Merge trunk into HDFS-1073.

Resolved several conflicts due to merge of HDFS-2149 and HDFS-2212.
Changes during resolution were:
- move the writing of the transaction ID out of EditLogOutputStream to
  FSEditLogOp.Writer to match trunk's organization
- remove JSPOOL related FsEditLogOp subclasses, add LogSegmentOp subclasses
- modify TestEditLogJournalFailures to not keep trying to use streams after
  the simulated halt, since newer stricter assertions caused these writes to
  fail


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1073@1152128 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/hdfs/CHANGES.txt b/hdfs/CHANGES.txt
index 3070cd9..2f1d6e6 100644
--- a/hdfs/CHANGES.txt
+++ b/hdfs/CHANGES.txt
@@ -9,6 +9,8 @@
 
     HDFS-1536. Improve HDFS WebUI. (hairong)
 
+    HDFS-2210. Remove hdfsproxy. (eli)
+
   NEW FEATURES
 
     HDFS-1359. Add BlockPoolID to Block. (suresh)
@@ -600,6 +602,22 @@
     cause. (Ravi Prakash via atm)
 
     HDFS-2180. Refactor NameNode HTTP server into new class. (todd)
+    
+    HDFS-2198. Remove hardcoded configuration keys. (suresh)
+
+    HDFS-2149. Move EditLogOp serialization formats into FsEditLogOp
+    implementations. (Ivan Kelly via todd)
+
+    HDFS-2191. Move datanodeMap from FSNamesystem to DatanodeManager.
+    (szetszwo)
+
+    HDFS-2200. Change FSNamesystem.LOG to package private. (szetszwo)
+
+    HDFS-2195. Refactor StorageDirectory to not be an non-static inner class.
+    (todd via eli)
+
+    HDFS-2212. Refactor double-buffering code out of EditLogOutputStreams.
+    (todd via eli)
 
   OPTIMIZATIONS
 
@@ -1370,6 +1388,9 @@
     HDFS-2071. Use of isConnected() in DataXceiver is invalid. (Kihwal Lee
     via todd)
 
+    HDFS-1981. NameNode does not saveNamespace() when editsNew is empty.
+    (Uma Maheswara Rao G via shv)
+
 Release 0.21.1 - Unreleased
     HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
 
diff --git a/hdfs/build.xml b/hdfs/build.xml
index 311195a..3660120 100644
--- a/hdfs/build.xml
+++ b/hdfs/build.xml
@@ -1397,7 +1397,6 @@
         <exclude name="src/c++/libhdfs/install-sh" />
         <exclude name="src/c++/libhdfs/ltmain.sh" />
         <exclude name="src/c++/libhdfs/missing" />
-        <exclude name="src/contrib/hdfsproxy/src/test/resources/" />
         <exclude name="src/test/checkstyle-noframes-sorted.xsl" />
         <exclude name="src/test/checkstyle.xml" />
         <exclude name="src/test/findbugsExcludeFile.xml" />
diff --git a/hdfs/src/contrib/build.xml b/hdfs/src/contrib/build.xml
index 5326d37..e69640a 100644
--- a/hdfs/src/contrib/build.xml
+++ b/hdfs/src/contrib/build.xml
@@ -48,12 +48,6 @@
     <subant target="test">
       <fileset dir="." includes="fuse-dfs/build.xml"/>
     </subant> 
-
-    <!-- hdfsproxy tests failing due to HDFS-1666
-    <subant target="test">
-      <fileset dir="." includes="hdfsproxy/build.xml"/>
-    </subant>
-      -->
   </target>
   
   
diff --git a/hdfs/src/contrib/hdfsproxy/README b/hdfs/src/contrib/hdfsproxy/README
deleted file mode 100644
index abf0dc6..0000000
--- a/hdfs/src/contrib/hdfsproxy/README
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-HDFS Proxy is a proxy server through which a hadoop client (through HSFTP) or a standard
-HTTPS client (wget, curl, etc) can talk to a hadoop server and more importantly pull data
-from the sever. It put an access control layer in front of hadoop namenode server and extends
-its functionalities to allow hadoop cross-version data transfer.
-
-HDFSPROXY can be configured/started via either Jetty or Tomcat with different supporting features.
-
-A) With Jetty-based Installation, supporting features include:
-> Single Hadoop source cluster data transfer
-> Single Hadoop version data transfer
-> Authenticate users via user SSL certificates with ProxyFilter installed
-> Enforce access control based on configuration files.
-
-B) With Tomcat-based Installation, supporting features include:
-> Multiple Hadoop source cluster data transfer
-> Multiple Hadoop version data transfer
-> Authenticate users via user SSL certificates with ProxyFilter installed
-> Authentication and authorization via LDAP with LdapIpDirFilter installed
-> Access control based on configuration files if ProxyFilter is installed.
-> Access control based on LDAP entries if LdapIpDirFilter is installed.
-> Standard HTTPS Get Support for file transfer
-
-The detailed configuration/set-up guide is in the Forrest 
-documentation, which can be found at $HADOOP_PREFIX/docs. In order to build the 
-documentation on your own from source please use the following command in 
-the downloaded source folder:
-
-ant docs -Dforrest.home=path to forrest -Djava5.home= path to jdk5. 
-
-The documentation so built would be under $HADOOP_PREFIX/build/docs
diff --git a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy b/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy
deleted file mode 100755
index 18be9d4..0000000
--- a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy
+++ /dev/null
@@ -1,170 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# The HdfsProxy command script
-#
-# Environment Variables
-#
-#   JAVA_HOME        The java implementation to use.  Overrides JAVA_HOME.
-#
-#   HDFSPROXY_CLASSPATH Extra Java CLASSPATH entries.
-#
-#   HDFSPROXY_HEAPSIZE  The maximum amount of heap to use, in MB. 
-#                    Default is 1000.
-#
-#   HDFSPROXY_OPTS      Extra Java runtime options.
-#   
-#   HDFSPROXY_NAMENODE_OPTS       These options are added to HDFSPROXY_OPTS 
-#   HDFSPROXY_CLIENT_OPTS         when the respective command is run.
-#   HDFSPROXY_{COMMAND}_OPTS etc  HDFSPROXY_JT_OPTS applies to JobTracker 
-#                              for e.g.  HDFSPROXY_CLIENT_OPTS applies to 
-#                              more than one command (fs, dfs, fsck, 
-#                              dfsadmin etc)  
-#
-#   HDFSPROXY_CONF_DIR  Alternate conf dir. Default is ${HDFSPROXY_HOME}/conf.
-#
-#   HDFSPROXY_ROOT_LOGGER The root appender. Default is INFO,console
-#
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. "$bin"/hdfsproxy-config.sh
-
-cygwin=false
-case "`uname`" in
-CYGWIN*) cygwin=true;;
-esac
-
-if [ -f "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh" ]; then
-  . "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh"
-fi
-
-# some Java parameters
-if [ "$JAVA_HOME" != "" ]; then
-  #echo "run java in $JAVA_HOME"
-  JAVA_HOME=$JAVA_HOME
-fi
-  
-if [ "$JAVA_HOME" = "" ]; then
-  echo "Error: JAVA_HOME is not set."
-  exit 1
-fi
-
-JAVA=$JAVA_HOME/bin/java
-JAVA_HEAP_MAX=-Xmx1000m 
-
-# check envvars which might override default args
-if [ "$HDFSPROXY_HEAPSIZE" != "" ]; then
-  #echo "run with heapsize $HDFSPROXY_HEAPSIZE"
-  JAVA_HEAP_MAX="-Xmx""$HDFSPROXY_HEAPSIZE""m"
-  #echo $JAVA_HEAP_MAX
-fi
-
-# CLASSPATH initially contains $HDFSPROXY_CONF_DIR
-CLASSPATH="${HDFSPROXY_CONF_DIR}"
-CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
-
-# for developers, add HdfsProxy classes to CLASSPATH
-if [ -d "$HDFSPROXY_HOME/build/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/classes
-fi
-if [ -d "$HDFSPROXY_HOME/build/web/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/web
-fi
-if [ -d "$HDFSPROXY_HOME/build/test/hdfs/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/test/hdfs/classes
-fi
-
-# so that filenames w/ spaces are handled correctly in loops below
-IFS=
-
-# for releases, add hdfsproxy jar & webapps to CLASSPATH
-if [ -d "$HDFSPROXY_HOME/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME
-fi
-for f in $HDFSPROXY_HOME/hdfsproxy-*.jar; do
-  CLASSPATH=${CLASSPATH}:$f;
-done
-
-# add libs to CLASSPATH
-if [ -d "$HDFSPROXY_HOME/lib" ]; then
-	for f in $HDFSPROXY_HOME/lib/*.jar; do
-	  CLASSPATH=${CLASSPATH}:$f;
-	done
-fi
-
-if [ -d "$HDFSPROXY_HOME/../../" ]; then
-	for f in $HDFSPROXY_HOME/../../*.jar; do
-    CLASSPATH=${CLASSPATH}:$f;
-  done
-fi
-if [ -d "$HDFSPROXY_HOME/../../lib" ]; then
-	for f in $HDFSPROXY_HOME/../../lib/*.jar; do
-    CLASSPATH=${CLASSPATH}:$f;
-  done
-fi
-if [ -d "$HDFSPROXY_HOME/../../lib/jsp-2.1" ]; then
-	for f in $HDFSPROXY_HOME/../../lib/jsp-2.1/*.jar; do
-    CLASSPATH=${CLASSPATH}:$f;
-  done
-fi
-
-
-# add user-specified CLASSPATH last
-if [ "$HDFSPROXY_CLASSPATH" != "" ]; then
-  CLASSPATH=${CLASSPATH}:${HDFSPROXY_CLASSPATH}
-fi
-
-# default log directory & file
-if [ "$HDFSPROXY_LOG_DIR" = "" ]; then
-  HDFSPROXY_LOG_DIR="$HDFSPROXY_HOME/logs"
-fi
-if [ "$HDFSPROXY_LOGFILE" = "" ]; then
-  HDFSPROXY_LOGFILE='hdfsproxy.log'
-fi
-
-# restore ordinary behaviour
-unset IFS
-
-# figure out which class to run
-CLASS='org.apache.hadoop.hdfsproxy.HdfsProxy'
-
-# cygwin path translation
-if $cygwin; then
-  CLASSPATH=`cygpath -p -w "$CLASSPATH"`
-  HDFSPROXY_HOME=`cygpath -d "$HDFSPROXY_HOME"`
-  HDFSPROXY_LOG_DIR=`cygpath -d "$HDFSPROXY_LOG_DIR"`
-fi
-
-# cygwin path translation
-if $cygwin; then
-  JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`
-fi
-
-HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.log.dir=$HDFSPROXY_LOG_DIR"
-HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.log.file=$HDFSPROXY_LOGFILE"
-HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.home.dir=$HDFSPROXY_HOME"
-HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.id.str=$HDFSPROXY_IDENT_STRING"
-HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.root.logger=${HDFSPROXY_ROOT_LOGGER:-INFO,console}"
-if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-  HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-fi  
-
-# run it
-exec "$JAVA" $JAVA_HEAP_MAX $HDFSPROXY_OPTS -classpath "$CLASSPATH" $CLASS "$@"
diff --git a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-config.sh b/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-config.sh
deleted file mode 100755
index 8fe6aac..0000000
--- a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-config.sh
+++ /dev/null
@@ -1,67 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# included in all the hadoop scripts with source command
-# should not be executable directly
-# also should not be passed any arguments, since we need original $*
-
-# resolve links - $0 may be a softlink
-
-this="$0"
-while [ -h "$this" ]; do
-  ls=`ls -ld "$this"`
-  link=`expr "$ls" : '.*-> \(.*\)$'`
-  if expr "$link" : '.*/.*' > /dev/null; then
-    this="$link"
-  else
-    this=`dirname "$this"`/"$link"
-  fi
-done
-
-# convert relative path to absolute path
-bin=`dirname "$this"`
-script=`basename "$this"`
-bin=`cd "$bin"; pwd`
-this="$bin/$script"
-
-# the root of the HdfsProxy installation
-export HDFSPROXY_HOME=`dirname "$this"`/..
-
-#check to see if the conf dir is given as an optional argument
-if [ $# -gt 1 ]
-then
-    if [ "--config" = "$1" ]
-	  then
-	      shift
-	      confdir=$1
-	      shift
-	      HDFSPROXY_CONF_DIR=$confdir
-    fi
-fi
- 
-# Allow alternate conf dir location.
-HDFSPROXY_CONF_DIR="${HDFSPROXY_CONF_DIR:-$HDFSPROXY_HOME/conf}"
-
-#check to see it is specified whether to use the slaves file
-if [ $# -gt 1 ]
-then
-    if [ "--hosts" = "$1" ]
-    then
-        shift
-        slavesfile=$1
-        shift
-        export HDFSPROXY_SLAVES="${HDFSPROXY_CONF_DIR}/$slavesfile"
-    fi
-fi
diff --git a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-daemon.sh b/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-daemon.sh
deleted file mode 100755
index 6d5a7524..0000000
--- a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-daemon.sh
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Runs a HdfsProxy as a daemon.
-#
-# Environment Variables
-#
-#   HDFSPROXY_CONF_DIR  Alternate conf dir. Default is ${HDFSPROXY_HOME}/conf.
-#   HDFSPROXY_LOG_DIR   Where log files are stored.  PWD by default.
-#   HDFSPROXY_MASTER    host:path where hdfsproxy code should be rsync'd from
-#   HDFSPROXY_PID_DIR   The pid files are stored. /tmp by default.
-#   HDFSPROXY_IDENT_STRING   A string representing this instance of hdfsproxy. $USER by default
-#   HDFSPROXY_NICENESS The scheduling priority for daemons. Defaults to 0.
-##
-
-usage="Usage: hdfsproxy-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] (start|stop) "
-
-# if no args specified, show usage
-if [ $# -le 1 ]; then
-  echo $usage
-  exit 1
-fi
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. "$bin"/hdfsproxy-config.sh
-
-# get arguments
-startStop=$1
-shift
-
-hdfsproxy_rotate_log ()
-{
-    log=$1;
-    num=5;
-    if [ -n "$2" ]; then
-	num=$2
-    fi
-    if [ -f "$log" ]; then # rotate logs
-	while [ $num -gt 1 ]; do
-	    prev=`expr $num - 1`
-	    [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
-	    num=$prev
-	done
-	mv "$log" "$log.$num";
-    fi
-}
-
-if [ -f "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh" ]; then
-  . "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh"
-fi
-
-# get log directory
-if [ "$HDFSPROXY_LOG_DIR" = "" ]; then
-  export HDFSPROXY_LOG_DIR="$HDFSPROXY_HOME/logs"
-fi
-mkdir -p "$HDFSPROXY_LOG_DIR"
-
-if [ "$HDFSPROXY_PID_DIR" = "" ]; then
-  HDFSPROXY_PID_DIR=/tmp
-fi
-
-if [ "$HDFSPROXY_IDENT_STRING" = "" ]; then
-  export HDFSPROXY_IDENT_STRING="$USER"
-fi
-
-# some variables
-export HDFSPROXY_LOGFILE=hdfsproxy-$HDFSPROXY_IDENT_STRING-$HOSTNAME.log
-export HDFSPROXY_ROOT_LOGGER="INFO,DRFA"
-log=$HDFSPROXY_LOG_DIR/hdfsproxy-$HDFSPROXY_IDENT_STRING-$HOSTNAME.out
-pid=$HDFSPROXY_PID_DIR/hdfsproxy-$HDFSPROXY_IDENT_STRING.pid
-
-# Set default scheduling priority
-if [ "$HDFSPROXY_NICENESS" = "" ]; then
-    export HDFSPROXY_NICENESS=0
-fi
-
-case $startStop in
-
-  (start)
-
-    mkdir -p "$HDFSPROXY_PID_DIR"
-
-    if [ -f $pid ]; then
-      if kill -0 `cat $pid` > /dev/null 2>&1; then
-        echo hdfsproxy running as process `cat $pid`.  Stop it first.
-        exit 1
-      fi
-    fi
-
-    if [ "$HDFSPROXY_MASTER" != "" ]; then
-      echo rsync from $HDFSPROXY_MASTER
-      rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HDFSPROXY_MASTER/ "$HDFSPROXY_HOME"
-    fi
-
-    hdfsproxy_rotate_log $log
-    echo starting hdfsproxy, logging to $log
-    cd "$HDFSPROXY_HOME"
-    nohup nice -n $HDFSPROXY_NICENESS "$HDFSPROXY_HOME"/bin/hdfsproxy --config $HDFSPROXY_CONF_DIR "$@" > "$log" 2>&1 < /dev/null &
-    echo $! > $pid
-    sleep 1; head "$log"
-    ;;
-          
-  (stop)
-
-    if [ -f $pid ]; then
-      if kill -0 `cat $pid` > /dev/null 2>&1; then
-        echo stopping hdfsproxy
-        kill `cat $pid`
-      else
-        echo no hdfsproxy to stop
-      fi
-    else
-      echo no hdfsproxy to stop
-    fi
-    ;;
-
-  (*)
-    echo $usage
-    exit 1
-    ;;
-
-esac
-
-
diff --git a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-daemons.sh b/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-daemons.sh
deleted file mode 100755
index 7dd8568..0000000
--- a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-daemons.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Run a HdfsProxy command on all slave hosts.
-
-usage="Usage: hdfsproxy-daemons.sh [--config confdir] [--hosts hostlistfile] [start|stop] "
-
-# if no args specified, show usage
-if [ $# -le 1 ]; then
-  echo $usage
-  exit 1
-fi
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. $bin/hdfsproxy-config.sh
-
-exec "$bin/hdfsproxy-slaves.sh" --config $HDFSPROXY_CONF_DIR cd "$HDFSPROXY_HOME" \; "$bin/hdfsproxy-daemon.sh" --config $HDFSPROXY_CONF_DIR "$@"
diff --git a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-slaves.sh b/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-slaves.sh
deleted file mode 100755
index db54bd5..0000000
--- a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-slaves.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Run a shell command on all slave hosts.
-#
-# Environment Variables
-#
-#   HDFSPROXY_SLAVES    File naming remote hosts.
-#     Default is ${HDFSPROXY_CONF_DIR}/hdfsproxy-hosts.
-#   HDFSPROXY_CONF_DIR  Alternate conf dir. Default is ${HDFSPROXY_HOME}/conf.
-#   HDFSPROXY_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
-#   HDFSPROXY_SSH_OPTS Options passed to ssh when running remote commands.
-##
-
-usage="Usage: hdfsproxy-slaves.sh [--config confdir] command..."
-
-# if no args specified, show usage
-if [ $# -le 0 ]; then
-  echo $usage
-  exit 1
-fi
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. "$bin"/hdfsproxy-config.sh
-
-# If the slaves file is specified in the command line,
-# then it takes precedence over the definition in 
-# hdfsproxy-env.sh. Save it here.
-HOSTLIST=$HDFSPROXY_SLAVES
-
-if [ -f "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh" ]; then
-  . "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh"
-fi
-
-if [ "$HOSTLIST" = "" ]; then
-  if [ "$HDFSPROXY_SLAVES" = "" ]; then
-    export HOSTLIST="${HDFSPROXY_CONF_DIR}/hdfsproxy-hosts"
-  else
-    export HOSTLIST="${HDFSPROXY_SLAVES}"
-  fi
-fi
-
-for slave in `cat "$HOSTLIST"`; do
- ssh $HDFSPROXY_SSH_OPTS $slave $"${@// /\\ }" \
-   2>&1 | sed "s/^/$slave: /" &
- if [ "$HDFSPROXY_SLAVE_SLEEP" != "" ]; then
-   sleep $HDFSPROXY_SLAVE_SLEEP
- fi
-done
-
-wait
diff --git a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-tomcat-server.sh b/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-tomcat-server.sh
deleted file mode 100644
index 78debe6..0000000
--- a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-tomcat-server.sh
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Runs a HdfsProxy as a daemon.
-#
-# Environment Variables
-#
-#   HDFSPROXY_CONF_DIR  Alternate conf dir. Default is ${HDFSPROXY_HOME}/conf.
-#   HDFSPROXY_MASTER    host:path where hdfsproxy code should be rsync'd from
-#   HDFSPROXY_PID_DIR   The pid files are stored. /tmp by default.
-#   HDFSPROXY_IDENT_STRING   A string representing this instance of hdfsproxy. $USER by default
-#   HDFSPROXY_NICENESS The scheduling priority for daemons. Defaults to 0.
-#		TOMCAT_HOME_DIR tomcat home directory.
-##
-
-usage="Usage: hdfsproxy-tomcat-server.sh [--config <conf-dir>] [--hosts hostlistfile] (start|stop) "
-
-# if no args specified, show usage
-if [ $# -le 1 ]; then
-  echo $usage
-  exit 1
-fi
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. "$bin"/hdfsproxy-config.sh
-
-# get arguments
-startStop=$1
-shift
-
-
-if [ -f "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh" ]; then
-  . "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh"
-fi
-
-
-if [ "$HDFSPROXY_IDENT_STRING" = "" ]; then
-  export HDFSPROXY_IDENT_STRING="$USER"
-fi
-
-
-# Set default scheduling priority
-if [ "$HDFSPROXY_NICENESS" = "" ]; then
-    export HDFSPROXY_NICENESS=0
-fi
-
-case $startStop in
-
-  (start)
-    if [ "$HDFSPROXY_MASTER" != "" ]; then
-      echo rsync from $HDFSPROXY_MASTER
-      rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HDFSPROXY_MASTER/ "$HDFSPROXY_HOME"
-    fi
-
-    echo starting hdfsproxy tomcat server
-    cd "$HDFSPROXY_HOME"
-    nohup nice -n $HDFSPROXY_NICENESS "$TOMCAT_HOME_DIR"/bin/startup.sh >& /dev/null &
-    sleep 1
-    ;;
-          
-  (stop)
-
-    echo stopping hdfsproxy tomcat server
-    cd "$HDFSPROXY_HOME"
-    nohup nice -n $HDFSPROXY_NICENESS "$TOMCAT_HOME_DIR"/bin/shutdown.sh >& /dev/null &
-    ;;
-
-  (*)
-    echo $usage
-    exit 1
-    ;;
-
-esac
-
-
diff --git a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-tomcat-servers.sh b/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-tomcat-servers.sh
deleted file mode 100644
index 33675a8..0000000
--- a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-tomcat-servers.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Run a HdfsProxy command on all slave hosts.
-
-usage="Usage: hdfsproxy-tomcat-servers.sh [--config confdir] [--hosts hostlistfile] [start|stop] "
-
-# if no args specified, show usage
-if [ $# -le 1 ]; then
-  echo $usage
-  exit 1
-fi
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. $bin/hdfsproxy-config.sh
-
-exec "$bin/hdfsproxy-tomcat-slaves.sh" --config $HDFSPROXY_CONF_DIR cd "$HDFSPROXY_HOME" \; "$bin/hdfsproxy-tomcat-server.sh" --config $HDFSPROXY_CONF_DIR "$@"
diff --git a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-tomcat-slaves.sh b/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-tomcat-slaves.sh
deleted file mode 100644
index 2a8e68e..0000000
--- a/hdfs/src/contrib/hdfsproxy/bin/hdfsproxy-tomcat-slaves.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Run a shell command on all slave hosts.
-#
-# Environment Variables
-#
-#   HDFSPROXY_SLAVES    File naming remote hosts.
-#     Default is ${HDFSPROXY_CONF_DIR}/hdfsproxy-hosts.
-#   HDFSPROXY_CONF_DIR  Alternate conf dir. Default is ${HDFSPROXY_HOME}/conf.
-#   HDFSPROXY_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
-#   HDFSPROXY_SSH_OPTS Options passed to ssh when running remote commands.
-##
-
-usage="Usage: hdfsproxy-tomcat-slaves.sh [--config confdir] command..."
-
-# if no args specified, show usage
-if [ $# -le 0 ]; then
-  echo $usage
-  exit 1
-fi
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. "$bin"/hdfsproxy-config.sh
-
-# If the slaves file is specified in the command line,
-# then it takes precedence over the definition in 
-# hdfsproxy-env.sh. Save it here.
-HOSTLIST=$HDFSPROXY_SLAVES
-
-if [ -f "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh" ]; then
-  . "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh"
-fi
-
-if [ "$HOSTLIST" = "" ]; then
-  if [ "$HDFSPROXY_SLAVES" = "" ]; then
-    export HOSTLIST="${HDFSPROXY_CONF_DIR}/hdfsproxy-hosts"
-  else
-    export HOSTLIST="${HDFSPROXY_SLAVES}"
-  fi
-fi
-
-for slave in `cat "$HOSTLIST"`; do
- ssh $HDFSPROXY_SSH_OPTS $slave $"${@// /\\ }" \
-   2>&1 | sed "s/^/$slave: /" & 
- if [ "$HDFSPROXY_SLAVE_SLEEP" != "" ]; then
-   sleep $HDFSPROXY_SLAVE_SLEEP
- fi
-done
-
-wait
diff --git a/hdfs/src/contrib/hdfsproxy/bin/proxy-util b/hdfs/src/contrib/hdfsproxy/bin/proxy-util
deleted file mode 100644
index 22094e0..0000000
--- a/hdfs/src/contrib/hdfsproxy/bin/proxy-util
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# The Proxy command utility script
-#
-# Environment Variables
-#
-#   JAVA_HOME        The java implementation to use.  Overrides JAVA_HOME.
-#
-#   HDFSPROXY_CLASSPATH Extra Java CLASSPATH entries.
-#
-#   HDFSPROXY_HEAPSIZE  The maximum amount of heap to use, in MB. 
-#                    Default is 1000.
-#
-#   HDFSPROXY_OPTS      Extra Java runtime options.
-#   
-#   HDFSPROXY_NAMENODE_OPTS       These options are added to HDFSPROXY_OPTS 
-#   HDFSPROXY_CLIENT_OPTS         when the respective command is run.
-#   HDFSPROXY_{COMMAND}_OPTS etc  HDFSPROXY_JT_OPTS applies to JobTracker 
-#                              for e.g.  HDFSPROXY_CLIENT_OPTS applies to 
-#                              more than one command (fs, dfs, fsck, 
-#                              dfsadmin etc)  
-#
-#   HDFSPROXY_CONF_DIR  Alternate conf dir. Default is ${HDFSPROXY_HOME}/conf.
-#
-#   HDFSPROXY_ROOT_LOGGER The root appender. Default is INFO,console
-#
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. "$bin"/hdfsproxy-config.sh
-
-cygwin=false
-case "`uname`" in
-CYGWIN*) cygwin=true;;
-esac
-
-if [ -f "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh" ]; then
-  . "${HDFSPROXY_CONF_DIR}/hdfsproxy-env.sh"
-fi
-
-# some Java parameters
-if [ "$JAVA_HOME" != "" ]; then
-  #echo "run java in $JAVA_HOME"
-  JAVA_HOME=$JAVA_HOME
-fi
-  
-if [ "$JAVA_HOME" = "" ]; then
-  echo "Error: JAVA_HOME is not set."
-  exit 1
-fi
-
-JAVA=$JAVA_HOME/bin/java
-JAVA_HEAP_MAX=-Xmx1000m 
-
-# check envvars which might override default args
-if [ "$HDFSPROXY_HEAPSIZE" != "" ]; then
-  #echo "run with heapsize $HDFSPROXY_HEAPSIZE"
-  JAVA_HEAP_MAX="-Xmx""$HDFSPROXY_HEAPSIZE""m"
-  #echo $JAVA_HEAP_MAX
-fi
-
-# CLASSPATH initially contains $HDFSPROXY_CONF_DIR
-CLASSPATH="${HADOOP_CONF_DIR}"
-CLASSPATH="${CLASSPATH}:${HDFSPROXY_CONF_DIR}"
-CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
-
-# for developers, add HdfsProxy classes to CLASSPATH
-if [ -d "$HDFSPROXY_HOME/build/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/classes
-fi
-if [ -d "$HDFSPROXY_HOME/build/web/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/web
-fi
-if [ -d "$HDFSPROXY_HOME/build/test/hdfs/classes" ]; then
-  CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME/build/test/hdfs/classes
-fi
-
-# so that filenames w/ spaces are handled correctly in loops below
-IFS=
-
-# for releases, add hdfsproxy jar & webapps to CLASSPATH
-if [ -d "$HDFSPROXY_HOME/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HDFSPROXY_HOME
-fi
-for f in $HDFSPROXY_HOME/hdfsproxy-*.jar; do
-  CLASSPATH=${CLASSPATH}:$f;
-done
-
-# add libs to CLASSPATH
-for f in $HDFSPROXY_HOME/lib/*.jar; do
-  CLASSPATH=${CLASSPATH}:$f;
-done
-
-# add user-specified CLASSPATH last
-if [ "$HDFSPROXY_CLASSPATH" != "" ]; then
-  CLASSPATH=${CLASSPATH}:${HDFSPROXY_CLASSPATH}
-fi
-
-# default log directory & file
-if [ "$HDFSPROXY_LOG_DIR" = "" ]; then
-  HDFSPROXY_LOG_DIR="$HDFSPROXY_HOME/logs"
-fi
-if [ "$HDFSPROXY_LOGFILE" = "" ]; then
-  HDFSPROXY_LOGFILE='proxy-util.log'
-fi
-
-# restore ordinary behaviour
-unset IFS
-
-# figure out which class to run
-CLASS='org.apache.hadoop.hdfsproxy.ProxyUtil'
-
-# cygwin path translation
-if $cygwin; then
-  CLASSPATH=`cygpath -p -w "$CLASSPATH"`
-  HDFSPROXY_HOME=`cygpath -d "$HDFSPROXY_HOME"`
-  HDFSPROXY_LOG_DIR=`cygpath -d "$HDFSPROXY_LOG_DIR"`
-fi
-
-# cygwin path translation
-if $cygwin; then
-  JAVA_LIBRARY_PATH=`cygpath -p "$JAVA_LIBRARY_PATH"`
-fi
-
-HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.log.dir=$HDFSPROXY_LOG_DIR"
-HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.log.file=$HDFSPROXY_LOGFILE"
-HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.home.dir=$HDFSPROXY_HOME"
-HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.id.str=$HDFSPROXY_IDENT_STRING"
-HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Dhdfsproxy.root.logger=${HDFSPROXY_ROOT_LOGGER:-INFO,console}"
-if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-  HDFSPROXY_OPTS="$HDFSPROXY_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-fi  
-
-# run it
-exec "$JAVA" $JAVA_HEAP_MAX $HDFSPROXY_OPTS -classpath "$CLASSPATH" $CLASS "$@"
diff --git a/hdfs/src/contrib/hdfsproxy/bin/start-hdfsproxy-tomcat.sh b/hdfs/src/contrib/hdfsproxy/bin/start-hdfsproxy-tomcat.sh
deleted file mode 100644
index 7b208ee..0000000
--- a/hdfs/src/contrib/hdfsproxy/bin/start-hdfsproxy-tomcat.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Start hdfsproxy tomcat servers.
-# Run this on master node.
-
-usage="Usage: start-hdfsproxy-tomcat.sh"
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. "$bin"/hdfsproxy-config.sh
-
-# get arguments
-if [ $# -ge 1 ]; then
-  echo $usage
-  exit 1
-fi
-
-# start hdfsproxy tomcat servers
-"$bin"/hdfsproxy-tomcat-servers.sh --config $HDFSPROXY_CONF_DIR --hosts hdfsproxy-hosts start
diff --git a/hdfs/src/contrib/hdfsproxy/bin/start-hdfsproxy.sh b/hdfs/src/contrib/hdfsproxy/bin/start-hdfsproxy.sh
deleted file mode 100755
index 2592d9c..0000000
--- a/hdfs/src/contrib/hdfsproxy/bin/start-hdfsproxy.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Start hdfsproxy daemons.
-# Run this on master node.
-
-usage="Usage: start-hdfsproxy.sh"
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. "$bin"/hdfsproxy-config.sh
-
-# get arguments
-if [ $# -ge 1 ]; then
-  echo $usage
-  exit 1
-fi
-
-# start hdfsproxy daemons
-# "$bin"/hdfsproxy-daemon.sh --config $HDFSPROXY_CONF_DIR start
-"$bin"/hdfsproxy-daemons.sh --config $HDFSPROXY_CONF_DIR --hosts hdfsproxy-hosts start
diff --git a/hdfs/src/contrib/hdfsproxy/bin/stop-hdfsproxy-tomcat.sh b/hdfs/src/contrib/hdfsproxy/bin/stop-hdfsproxy-tomcat.sh
deleted file mode 100644
index c03badc5..0000000
--- a/hdfs/src/contrib/hdfsproxy/bin/stop-hdfsproxy-tomcat.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Stop hdfsproxy tomcat servers.  Run this on master node.
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. "$bin"/hdfsproxy-config.sh
-
-# "$bin"/hdfsproxy-daemon.sh --config $HDFSPROXY_CONF_DIR stop
-"$bin"/hdfsproxy-tomcat-servers.sh --config $HDFSPROXY_CONF_DIR --hosts hdfsproxy-hosts stop
-
diff --git a/hdfs/src/contrib/hdfsproxy/bin/stop-hdfsproxy.sh b/hdfs/src/contrib/hdfsproxy/bin/stop-hdfsproxy.sh
deleted file mode 100755
index 78089e3..0000000
--- a/hdfs/src/contrib/hdfsproxy/bin/stop-hdfsproxy.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Stop hdfsproxy daemons.  Run this on master node.
-
-bin=`dirname "$0"`
-bin=`cd "$bin"; pwd`
-
-. "$bin"/hdfsproxy-config.sh
-
-# "$bin"/hdfsproxy-daemon.sh --config $HDFSPROXY_CONF_DIR stop
-"$bin"/hdfsproxy-daemons.sh --config $HDFSPROXY_CONF_DIR --hosts hdfsproxy-hosts stop
-
diff --git a/hdfs/src/contrib/hdfsproxy/build.xml b/hdfs/src/contrib/hdfsproxy/build.xml
deleted file mode 100644
index 7119c50..0000000
--- a/hdfs/src/contrib/hdfsproxy/build.xml
+++ /dev/null
@@ -1,492 +0,0 @@
-<?xml version="1.0" ?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<project name="hdfsproxy" default="jar" xmlns:ivy="antlib:org.apache.ivy.ant">
-	<property name="hdfsproxyVersion" value="2.0"/>
-	<property name="final.name" value="${ant.project.name}-${hdfsproxyVersion}"/>
-	<property name="javac.debug" value="on"/>
-	<property name="javac.optimize" value="on"/>
-	<import file="../build-contrib.xml"/>
-	
-	<property name="bin.dir" value="${basedir}/bin"/>
-	<property name="lib.dir" value="${basedir}/lib"/>
-	<property name="hadoop.jars.dir" value="${basedir}/hadoopjars"/>
-	
-	<property name="docs.dir" value="${basedir}/docs"/>
-	<property name="test.build.dir" value="${build.dir}/test"/>
-	<property name="test.build.classes" value="${test.build.dir}/classes"/>	
-	<property name="src.test.resources" value="${basedir}/src/test/resources"/>
-	<property name="ssl.keystore.proxy" value="${src.test.resources}/ssl-keys/proxy.keystore"/>
-	<property name="ssl.keystore.client" value="${src.test.resources}/ssl-keys/client.keystore"/>
-	<property name="ssl.client.cert" value="${src.test.resources}/ssl-keys/test.crt"/>
-	<property name="proxy.conf.test" value="${src.test.resources}/proxy-config"/>
-	<property name="tomcat.conf.test" value="${src.test.resources}/tomcat-config"/>
-	<property name="target.dir" value="${build.dir}/target"/>
-	<property name="logs.dir" value="${target.dir}/logs"/>
-	<property name="reports.dir" value="${target.dir}/reports"/>
-	<property name="tomcatconfig.dir" value="${target.dir}/tomcat-config"/>
-	<property name="tomcat.container.id" value="tomcat5x"/>
-	<property name="cargo.logging" value="high"/>
-	<property name="cactus.formatter.type" value="xml"/>
-	<property name="cactus.warfile.name" value="test"/>	
-  
-	<available file="${hadoop.root}/build/classes" type="dir" property="test.available"/>
-	<property environment="env"/>
-	<!-- check if environment has been set -->
-  <condition property="proxy.conf.dir" value="${env.HDFSPROXY_CONF_DIR}" else="${basedir}/conf">
-    <and>
-        <isset property="env.HDFSPROXY_CONF_DIR"/>
-        <available file="${env.HDFSPROXY_CONF_DIR}/hdfsproxy-default.xml"/>
-    </and>
-  </condition> 
-  
-  <condition property="startCactus">
-  	<and>
-	    <or>
-	    	<equals arg1="${testcase}" arg2="TestProxyFilter" />
-	    	<equals arg1="${testcase}" arg2="TestAuthorizationFilter" />
-	    	<equals arg1="${testcase}" arg2="TestLdapIpDirFilter" />
-				<equals arg1="${testcase}" arg2="TestProxyUtil" />
-				<equals arg1="${testcase}" arg2="TestProxyForwardServlet" />
-				<not>
-					<isset property="testcase"/>
-				</not>
-	    </or>
-    	<isset property="test.available"/>
-    </and>
-  </condition>
-  
-  <condition property="useClover">
-  	<and>
-    	<isset property="clover.home"/>
-    	<available file="${clover.home}/lib/clover.jar"/>
-    </and>
-  </condition>
-
-	<property name="ivy.settings.file" location="${hadoop.root}/ivy/ivysettings.xml"/>
-  
-  <target name="ivy-init" depends="ivy-init-antlib">
-    		<ivy:settings id="${ant.project.name}.ivy.settings"/>
-	</target>
-	
-	<!-- Define the Cactus tasks -->	
-	<target name="load-tasks" depends="ivy-retrieve-common">
-		<taskdef resource="cactus.tasks"
-			 classpathref="cactus.classpath">
-		</taskdef>		
-	</target>
-	
-
-	<target name="jar" depends="compile" description="Create jar">
-		<echo>
-            Building the .jar files.
-        </echo>
-		<jar jarfile="${build.dir}/${final.name}.jar" basedir="${build.classes}" includes="org/apache/hadoop/hdfsproxy/**/*.class" >
-                        <manifest>
-                            <section name="org/apache/hadoop/hdfsproxy">
-                                <attribute name="Implementation-Title" value="HdfsProxy"/>
-                                <attribute name="Implementation-Version" value="${hdfsproxyVersion}"/>
-                                <attribute name="Implementation-Vendor" value="Apache"/>
-                            </section>
-                        </manifest>
-		</jar>
-	</target>
-	
-	
-	<!-- ================================================================== -->
-	<!-- Make war file                                              -->
-	<!-- ================================================================== -->
-	
-	<target name="war" depends="compile" description="Create war">
-		<echo>
-			Building the .war file
-		</echo>
-	  <war destfile="${build.dir}/${final.name}.war" webxml="${basedir}/conf/tomcat-web.xml">
-	    <lib dir="${common.ivy.lib.dir}">
-	      <include name="commons-logging-${commons-logging.version}.jar"/>
-              <include name="junit-${junit.version}.jar"/>
-              <include name="log4j-${log4j.version}.jar"/>
-              <include name="slf4j-api-${slf4j-api.version}.jar"/>
-              <include name="slf4j-log4j12-${slf4j-log4j12.version}.jar"/>
-              <include name="xmlenc-${xmlenc.version}.jar"/>
-              <include name="core-${core.vesion}.jar"/> 
-	      <include name="hadoop-common-${hadoop-common.version}.jar"/>
-	    </lib>
-	    <classes dir="${proxy.conf.dir}">
-	    	<include name="hdfsproxy-default.xml"/>
-	    	<include name="user-certs.xml"/>
-	    	<include name="user-permissions.xml"/>
-	    </classes>
-	    <classes dir="${build.classes}"/>
-	    <classes dir="${hadoop.root}/build/classes"/>
-	  </war>
-	</target>
-	
-	<target name="forward" depends="compile" description="Create forward war">
-		<echo>
-			Building the forward war file
-		</echo>
-	  <war destfile="${build.dir}/${final.name}-forward.war" webxml="${basedir}/conf/tomcat-forward-web.xml">
-	    <lib dir="${common.ivy.lib.dir}">
-	      <include name="commons-logging-${commons-logging.version}.jar"/>
-        <include name="junit-${junit.version}.jar"/>
-        <include name="log4j-${log4j.version}.jar"/>
-        <include name="slf4j-api-${slf4j-api.version}.jar"/>
-        <include name="slf4j-log4j12-${slf4j-log4j12.version}.jar"/>
-        <include name="xmlenc-${xmlenc.version}.jar"/>
-        <include name="core-${core.vesion}.jar"/> 
-	    </lib>
-	    <lib dir="${hadoop.root}/lib">
-	        <include name="hadoop-common-${hadoop-common.version}.jar"/>
-	    </lib>
-	    <classes dir="${proxy.conf.dir}">
-	    	<include name="hdfsproxy-default.xml"/>
-	    	<include name="hdfsproxy-site.xml"/>
-	    	<include name="user-certs.xml"/>
-	    	<include name="user-permissions.xml"/>
-	    </classes>
-	    <classes dir="${build.classes}"/>
-	    <classes dir="${hadoop.root}/build/classes"/>
-	  </war>
-	</target>
-	
-	<target name="testwar" depends="compile" description="Create testing war">
-		<echo>
-			Building the testing .war file 
-		</echo>
-	  <war destfile="${build.dir}/${final.name}-test.war" webxml="${src.test.resources}/tomcat-web.xml">
-	    <lib dir="${common.ivy.lib.dir}">
-	      <include name="commons-logging-${commons-logging.version}.jar"/>
-              <include name="junit-${junit.version}.jar"/>
-              <include name="log4j-${log4j.version}.jar"/>
-              <include name="slf4j-api-${slf4j-api.version}.jar"/>
-              <include name="slf4j-log4j12-${slf4j-log4j12.version}.jar"/>
-              <include name="xmlenc-${xmlenc.version}.jar"/>
-              <include name="core-${core.vesion}.jar"/> 
-	      <include name="hadoop-common-${hadoop-common.version}.jar"/>
-	    </lib>
-	    <classes dir="${proxy.conf.test}" excludes="**/*.template **/*.sh"/>
-	    <classes dir="${build.classes}"/>
-	    <classes dir="${hadoop.root}/build/classes"/>
-	  </war>
-	</target>	
-	
-	<target name="cactifywar" depends="testwar,load-tasks,cactifywar-pure,cactifywar-clover" description="To include clover coverage test use -Dclover.home ..."/>
-	
-	<target name="cactifywar-pure" depends="testwar,load-tasks" unless="useClover">
-		<mkdir dir="${target.dir}" />
-		<echo> no clover found ...</echo>
-    <cactifywar srcfile="${build.dir}/${final.name}-test.war"
-        destfile="${target.dir}/${cactus.warfile.name}.war"
-        mergewebxml="${src.test.resources}/cactus-web.xml">
-      <servletredirector/>
-      <servletredirector name="ServletRedirectorSecure"
-          mapping="/ServletRedirectorSecure" roles="test"/>
-      <filterredirector mapping="/test/filterRedirector.jsp"/>      
-      <classes dir="${test.build.dir}"/>
-    </cactifywar>    	
-	</target>
-
-	<target name="cactifywar-clover" depends="testwar,load-tasks" if="useClover">
-		<mkdir dir="${target.dir}" />
-		<echo> Including clover.jar in the war file ...</echo>
-    <cactifywar srcfile="${build.dir}/${final.name}-test.war"
-        destfile="${target.dir}/${cactus.warfile.name}.war"
-        mergewebxml="${src.test.resources}/cactus-web.xml">
-      <servletredirector/>
-      <servletredirector name="ServletRedirectorSecure"
-          mapping="/ServletRedirectorSecure" roles="test"/>
-      <filterredirector mapping="/test/filterRedirector.jsp"/>
-      <classes dir="${test.build.dir}"/>
-      <lib dir="${clover.home}/lib">
-      	<include name="clover.jar"/> 
-      </lib>
-    </cactifywar>    	
-	</target>
-	
-	<target name="test" depends="compile,compile-test,test-junit,test-cactus" description="Automated Test Framework" if="test.available"/>
-	
-	<target name="test-junit" depends="compile,compile-test" if="test.available">
-		<junit fork="yes" printsummary="yes" errorProperty="tests.failed" failureProperty="tests.failed">
-        <classpath refid="test.classpath"/>
-        <sysproperty key="test.build.data" value="${build.test}/data"/>
-	      <sysproperty key="build.test" value="${build.test}"/>
-	      <sysproperty key="user.dir" value="${build.test}/data"/>
-	      <sysproperty key="fs.default.name" value="${fs.default.name}"/>
-	      <sysproperty key="hadoop.test.localoutputfile" value="${hadoop.test.localoutputfile}"/>
-	      <sysproperty key="hadoop.log.dir" value="${hadoop.log.dir}"/> 
-        <sysproperty key="test.src.dir" value="${test.src.dir}"/>
-        <sysproperty key="javax.net.ssl.trustStore" value="${ssl.keystore.proxy}"/>
-				<sysproperty key="javax.net.ssl.trustStorePassword" value="changeme"/>
-				<sysproperty key="javax.net.ssl.keyStore.proxy" value="${ssl.keystore.proxy}"/>
-				<sysproperty key="javax.net.ssl.keyStore" value="${ssl.keystore.client}"/>
-				<sysproperty key="javax.net.ssl.keyStorePassword" value="changeme"/>
-				<sysproperty key="javax.net.ssl.keyPassword" value="changeme"/>			
-				<sysproperty key="javax.net.ssl.clientCert" value="${ssl.client.cert}"/>
-        <formatter type="plain" />
-        <batchtest todir="${test.build.dir}" unless="testcase">
-           <fileset dir="${src.test}">
-             <include name="**/TestHdfsProxy.java"/>
-             <include name="**/TestProxyUgiManager.java"/>
-           </fileset>
-        </batchtest>
-        <batchtest todir="${test.build.dir}" if="testcase">
-            <fileset dir="${src.test}">
-            	<include name="**/${testcase}.java"/>
-            	<exclude name="**/TestProxyFilter.java"/>
-            	<exclude name="**/TestAuthorizationFilter.java"/>
-            	<exclude name="**/TestLdapIpDirFilter.java"/>
-            	<exclude name="**/TestProxyUtil.java"/>
-            	<exclude name="**/TestProxyForwardServlet.java"/>
-            </fileset>
-         </batchtest>
-    </junit>    	
-    <fail if="tests.failed">Tests failed!</fail>
-	</target>
-
-
-	<target name="test-cactus" depends="compile,compile-test,cactifywar" if="startCactus">
-		<exec executable="${env.JAVA_HOME}/bin/java" outputproperty="cargo.servlet.admin.port">
-	    <arg line="-cp ${build.test} org.apache.hadoop.hdfsproxy.FindFreePort -random"/>
-		</exec>
-		<exec executable="${env.JAVA_HOME}/bin/java" outputproperty="cargo.servlet.http.port">
-	    <arg line="-cp ${build.test} org.apache.hadoop.hdfsproxy.FindFreePort ${cargo.servlet.admin.port}"/>
-		</exec>
-		<exec executable="${env.JAVA_HOME}/bin/java" outputproperty="cargo.servlet.https.port">
-	    <arg line="-cp ${build.test} org.apache.hadoop.hdfsproxy.FindFreePort ${cargo.servlet.http.port}"/>
-		</exec>
-		
-		<echo> Free Ports: startup-${cargo.servlet.admin.port} / http-${cargo.servlet.http.port} / https-${cargo.servlet.https.port}</echo>
-	  <echo>Please take a deep breath while Cargo gets the Tomcat for running the servlet tests...</echo>
-	  
-	  <mkdir dir="${tomcatconfig.dir}" />
-	  <mkdir dir="${tomcatconfig.dir}/conf" />
-	  <mkdir dir="${tomcatconfig.dir}/webapps" />
-	  <mkdir dir="${tomcatconfig.dir}/temp" />
-	  <mkdir dir="${logs.dir}" />
-	  <mkdir dir="${reports.dir}" />	  
-	  <copy file="${tomcat.conf.test}/server.xml" tofile="${tomcatconfig.dir}/conf/server.xml" overwrite="true">
-		  <filterset>
-		    <filter token="ADMIN.PORT" value="${cargo.servlet.admin.port}"/>
-		    <filter token="HTTP.PORT" value="${cargo.servlet.http.port}"/>
-		    <filter token="HTTPS.PORT" value="${cargo.servlet.https.port}"/>
-		  </filterset>
-		</copy>		
-		<copy file="${tomcat.conf.test}/web.xml" tofile="${tomcatconfig.dir}/conf/web.xml"/>
-		<copy file="${tomcat.conf.test}/tomcat-users.xml" tofile="${tomcatconfig.dir}/conf/tomcat-users.xml"/>
-	
-		<cactus warfile="${target.dir}/${cactus.warfile.name}.war" fork="yes" haltonfailure="no" printsummary="yes" failureproperty="tests.failed">
-			<classpath>
-				<path refid="cactus.classpath"/>
-				<pathelement location="${build.classes}"/>
-				<pathelement location="${src.test.resources}"/>
-				<pathelement location="${src.test.resources}/proxy-config"/>
-			</classpath>			
-			<containerset>
-				<cargo containerId="${tomcat.container.id}" timeout="30000" output="${logs.dir}/output.log" log="${logs.dir}/cargo.log">
-				 <zipUrlInstaller
-            installUrl="http://archive.apache.org/dist/tomcat/tomcat-6/v6.0.24/bin/apache-tomcat-6.0.24.zip"
-            installDir="${target.dir}/${tomcat.container.id}"/>
-				  <configuration type="existing" home="${tomcatconfig.dir}">
-						<property name="cargo.servlet.port" value="${cargo.servlet.http.port}"/>
-						<property name="cargo.logging" value="${cargo.logging}"/>
-						<property name="cactus.toDir" value="${build.test}"/>
-						<deployable type="war" file="${target.dir}/${cactus.warfile.name}.war"/>
-					</configuration>
-				</cargo>
-			</containerset>
-			<sysproperty key="test.build.data" value="${build.test}/data"/>
-      <sysproperty key="build.test" value="${build.test}"/>
-      <sysproperty key="build.target" value="${target.dir}"/>
-			<sysproperty key="javax.net.ssl.trustStore" value="${ssl.keystore.proxy}"/>
-			<sysproperty key="javax.net.ssl.trustStorePassword" value="changeme"/>
-			<sysproperty key="javax.net.ssl.keyStore.proxy" value="${ssl.keystore.proxy}"/>
-			<sysproperty key="javax.net.ssl.keyStore" value="${ssl.keystore.client}"/>
-			<sysproperty key="javax.net.ssl.keyStorePassword" value="changeme"/>
-			<sysproperty key="javax.net.ssl.keyPassword" value="changeme"/>			
-			<sysproperty key="javax.net.ssl.clientCert" value="${ssl.client.cert}"/>     
-      <sysproperty key="test.proxy.conf.dir" value="${proxy.conf.test}"/>
-      <sysproperty key="test.proxy.https.port" value="${cargo.servlet.https.port}"/>
-      
-			<formatter type="${cactus.formatter.type}"/>
-			<batchtest todir="${reports.dir}" unless="testcase">
-				<fileset dir="${src.test}">
-					<include name="**/Test*.java"/>
-					<exclude name="**/TestHdfsProxy.java"/>
-					<exclude name="**/TestProxyUgiManager.java"/>
-				</fileset>
-			</batchtest>
-			<batchtest todir="${reports.dir}" if="testcase">
-        <fileset dir="${src.test}">
-        	<include name="**/${testcase}.java"/>
-        	<exclude name="**/TestHdfsProxy.java"/>
-        	<exclude name="**/TestProxyUgiManager.java"/>
-        </fileset>
-      </batchtest>
-		</cactus>		
-    <fail if="tests.failed">Tests failed!</fail>
-	</target>
-	<!-- ====================================================== -->
-	<!-- Macro definitions                                      -->
-	<!-- ====================================================== -->
-	<macrodef name="macro_tar" description="Worker Macro for tar">
-		<attribute name="param.destfile"/>
-		<element name="param.listofitems"/>
-		<sequential>
-			<tar compression="gzip" longfile="gnu"
-          destfile="@{param.destfile}">
-				<param.listofitems/>
-			</tar>
-		</sequential>
-	</macrodef>
-
-	<!-- ================================================================== -->
-	<!-- D I S T R I B U T I O N                                            -->
-	<!-- ================================================================== -->
-	<!--                                                                    -->
-	<!-- ================================================================== -->
-	<target name="local-package" depends="jar,war" description="Package in local build directory">
-		<mkdir dir="${build.dir}/${final.name}"/>
-		<mkdir dir="${build.dir}/${final.name}/logs"/>
-		<copy todir="${build.dir}/${final.name}" includeEmptyDirs="false">
-			<fileset dir="${build.dir}">
-				<include name="*.jar" />
-				<include name="*.war" />
-			</fileset>
-		</copy>
-		<copy todir="${build.dir}/${final.name}/lib" includeEmptyDirs="false">
-			<fileset dir="${common.ivy.lib.dir}">
-        <include name="commons-logging-${commons-logging.version}.jar"/>
-        <include name="commons-logging-api-${commons-logging-api.version}.jar"/>
-        <include name="junit-${junit.version}.jar"/>
-        <include name="log4j-${log4j.version}.jar"/>
-        <include name="slf4j-api-${slf4j-api.version}.jar"/>
-        <include name="slf4j-log4j12-${slf4j-log4j12.version}.jar"/>
-        <include name="xmlenc-${xmlenc.version}.jar"/>
-        <include name="jetty-util-${jetty-util.version}.jar"/>
-        <include name="jetty-${jetty.version}.jar"/>
-        <include name="servlet-api-2.5-${servlet-api-2.5.version}.jar"/>
-        <include name="core-${core.vesion}.jar"/> 
-                     <!--  </fileset>
-		       <fileset dir="${hadoop.root}/lib/jsp-${jsp.version}"> -->
-        <include name="jsp-${jsp.version}-${jetty.version}.jar"/> 
-        <include name="jsp-api-${jsp.version}-${jetty.version}.jar"/> 
-			</fileset>
-		</copy>
-
-		<copy todir="${build.dir}/${final.name}/lib" includeEmptyDirs="false">
-                  	<fileset dir="${hadoop.root}/build">
-                          	<include name="*-core.jar"/>
-                          	<include name="*-tools.jar"/>
-			</fileset>
-		</copy>
-
-		<copy todir="${build.dir}/${final.name}/bin">
-			<fileset dir="${bin.dir}"/>
-		</copy>
-	
-
-		<copy todir="${build.dir}/${final.name}/conf">
-			<fileset dir="${proxy.conf.dir}"/>
-		</copy>
-		
-
-		<copy todir="${build.dir}/${final.name}">
-			<fileset dir="${basedir}">
-				<include name="README" />
-				<include name="build.xml" />
-				<include name="*.txt" />
-			</fileset>
-		</copy>
-
-		<copy todir="${build.dir}/${final.name}/src" includeEmptyDirs="true">
-			<fileset dir="${src.dir}" excludes="**/*.template **/docs/build/**/*"/>
-		</copy>
-
-		<chmod perm="ugo+x" type="file" parallel="false">
-			<fileset dir="${build.dir}/${final.name}/bin"/>
-		</chmod>
-
-	</target>
-	<target name="package" depends="local-package" description="Build distribution">
-    <mkdir dir="${dist.dir}/contrib/${name}"/>
-    <copy todir="${dist.dir}/contrib/${name}">
-      <fileset dir="${build.dir}/${final.name}">
-      	<exclude name="**/lib/**" />
-      	<exclude name="**/src/**" />
-      	<exclude name="*.war" />
-      </fileset>
-    </copy>
-    <chmod dir="${dist.dir}/contrib/${name}/bin" perm="a+x" includes="*"/>
-	</target>
-
-	<!-- ================================================================== -->
-	<!-- Make release tarball                                               -->
-	<!-- ================================================================== -->
-	<target name="tar" depends="local-package,war" description="Make release tarball">
-		<macro_tar param.destfile="${build.dir}/${final.name}.tar.gz">
-			<param.listofitems>
-				<tarfileset dir="${build.dir}" mode="664">
-					<exclude name="${final.name}/bin/*" />
-					<include name="${final.name}/**" />
-				</tarfileset>
-				<tarfileset dir="${build.dir}" mode="755">
-					<include name="${final.name}/bin/*" />
-				</tarfileset>
-			</param.listofitems>
-		</macro_tar>
-	</target>
-
-	<target name="binary" depends="local-package,war" description="Make tarball without source and documentation">
-		<macro_tar param.destfile="${build.dir}/${final.name}-bin.tar.gz">
-			<param.listofitems>
-				<tarfileset dir="${build.dir}" mode="664">
-					<exclude name="${final.name}/bin/*" />
-					<exclude name="${final.name}/src/**" />
-					<exclude name="${final.name}/docs/**" />
-					<include name="${final.name}/**" />
-				</tarfileset>
-				<tarfileset dir="${build.dir}" mode="755">
-					<include name="${final.name}/bin/*" />
-				</tarfileset>
-			</param.listofitems>
-		</macro_tar>
-	</target>
-
-
-	 <!-- the unit test classpath -->
-  <path id="test.classpath">
-  	<pathelement location="${proxy.conf.test}" />
-    <pathelement location="${test.build.dir}" />
-    <pathelement location="${hadoop.root}/build/test/hdfs/classes"/>
-    <!--<pathelement location="${hadoop.root}/src/contrib/test"/>-->
-    <pathelement location="${hadoop.root}/conf"/>
-    <pathelement location="${hadoop.root}/build"/>
-    <pathelement location="${hadoop.root}/build/classes"/>
-    <pathelement location="${hadoop.root}/build/tools"/>
-    <pathelement location="${build.examples}"/>
-    <pathelement path="${clover.jar}"/>
-    <path refid="contrib-classpath"/>
-  </path>
-  
-  <path id="cactus.classpath">
-    <path refid="test.classpath"/>
-  </path>
-
-</project>
diff --git a/hdfs/src/contrib/hdfsproxy/conf/configuration.xsl b/hdfs/src/contrib/hdfsproxy/conf/configuration.xsl
deleted file mode 100644
index 377cdbe..0000000
--- a/hdfs/src/contrib/hdfsproxy/conf/configuration.xsl
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:output method="html"/>
-<xsl:template match="configuration">
-<html>
-<body>
-<table border="1">
-<tr>
- <td>name</td>
- <td>value</td>
- <td>description</td>
-</tr>
-<xsl:for-each select="property">
-<tr>
-  <td><a name="{name}"><xsl:value-of select="name"/></a></td>
-  <td><xsl:value-of select="value"/></td>
-  <td><xsl:value-of select="description"/></td>
-</tr>
-</xsl:for-each>
-</table>
-</body>
-</html>
-</xsl:template>
-</xsl:stylesheet>
diff --git a/hdfs/src/contrib/hdfsproxy/conf/hdfsproxy-default.xml b/hdfs/src/contrib/hdfsproxy/conf/hdfsproxy-default.xml
deleted file mode 100644
index 3215246..0000000
--- a/hdfs/src/contrib/hdfsproxy/conf/hdfsproxy-default.xml
+++ /dev/null
@@ -1,128 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put hdfsproxy specific properties in this file. -->
-
-<configuration>
-
-<property>
-  <name>hdfsproxy.https.address</name>
-  <value>0.0.0.0:8443</value>
-  <description>the SSL port that hdfsproxy listens on
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.hosts</name>
-  <value>hdfsproxy-hosts</value>
-  <description>location of hdfsproxy-hosts file
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.dfs.namenode.address</name>
-  <value>localhost:54321</value>
-  <description>namenode address of the HDFS cluster being proxied
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.https.server.keystore.resource</name>
-  <value>ssl-server.xml</value>
-  <description>location of the resource from which ssl server keystore
-  information will be extracted
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.user.permissions.file.location</name>
-  <value>user-permissions.xml</value>
-  <description>location of the user permissions file
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.user.certs.file.location</name>
-  <value>user-certs.xml</value>
-  <description>location of the user certs file
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.ugi.cache.ugi.lifetime</name>
-  <value>15</value>
-  <description> The lifetime (in minutes) of a cached ugi
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.ldap.initial.context.factory</name>
-  <value>com.sun.jndi.ldap.LdapCtxFactory</value>
-  <description> ldap initial context factory
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.ldap.provider.url</name>
-  <value>ldap://localhost:389</value>
-  <description> ldap server address
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.ldap.role.base</name>
-  <value>ou=proxyroles,dc=mycompany,dc=com</value>
-  <description> ldap role base
-  </description>
-</property>
-
-<property>
-    <name>fs.default.name</name>
-    <!-- cluster variant -->
-    <value>hdfs://localhost:54321</value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-<property>
-  <name>dfs.blocksize</name>
-  <value>134217728</value>
-  <description>The default block size for new files.</description>
-</property>
-
-<property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-</property>
-
-    <property>
-        <name>hdfsproxy.kerberos.principal</name>
-        <value>user@REALM</value>
-        <description> kerberos principal to be used by hdfsproxy </description>
-    </property>
-
-    <property>
-        <name>hdfsproxy.kerberos.keytab</name>
-        <value>proxy.prod.headless.keytab</value>
-        <description> kerberos keytab to be used by hdfsproxy </description>
-    </property>
-
-    <property>
-        <name>hdfsproxy.kerberos.default.realm</name>
-        <value>/instance@REALM</value>
-        <description> kerberos default realm appended to non-qualified userIds </description>
-    </property>
-
-    <property>
-        <name>dfs.namenode.kerberos.principal</name>
-        <value>hdfs@REALM</value>
-        <description> Namenode user name key.  </description>
-    </property>
-
-</configuration>
-
diff --git a/hdfs/src/contrib/hdfsproxy/conf/hdfsproxy-env.sh b/hdfs/src/contrib/hdfsproxy/conf/hdfsproxy-env.sh
deleted file mode 100644
index a0ff7a5..0000000
--- a/hdfs/src/contrib/hdfsproxy/conf/hdfsproxy-env.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-# Set HdfsProxy-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
-
-# Extra Java CLASSPATH elements.  Optional.
-# export HDFSPROXY_CLASSPATH=
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HDFSPROXY_HEAPSIZE=2000
-
-# Extra Java runtime options.  Empty by default.
-# export HDFSPROXY_OPTS=
-
-# Extra ssh options.  Empty by default.
-# export HDFSPROXY_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HDFSPROXY_CONF_DIR"
-
-# Where log files are stored.  $HDFSPROXY_HOME/logs by default.
-# export HDFSPROXY_LOG_DIR=${HDFSPROXY_HOME}/logs
-
-# File naming remote slave hosts.  $HDFSPROXY_HOME/conf/slaves by default.
-# export HDFSPROXY_SLAVES=${HDFSPROXY_HOME}/conf/slaves
-
-# host:path where hdfsproxy code should be rsync'd from.  Unset by default.
-# export HDFSPROXY_MASTER=master:/home/$USER/src/hdfsproxy
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HDFSPROXY_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-# export HDFSPROXY_PID_DIR=/var/hdfsproxy/pids
-
-# A string representing this instance of hdfsproxy. $USER by default.
-# export HDFSPROXY_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-# export HDFSPROXY_NICENESS=10
diff --git a/hdfs/src/contrib/hdfsproxy/conf/hdfsproxy-env.sh.template b/hdfs/src/contrib/hdfsproxy/conf/hdfsproxy-env.sh.template
deleted file mode 100644
index a0ff7a5..0000000
--- a/hdfs/src/contrib/hdfsproxy/conf/hdfsproxy-env.sh.template
+++ /dev/null
@@ -1,44 +0,0 @@
-# Set HdfsProxy-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
-
-# Extra Java CLASSPATH elements.  Optional.
-# export HDFSPROXY_CLASSPATH=
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HDFSPROXY_HEAPSIZE=2000
-
-# Extra Java runtime options.  Empty by default.
-# export HDFSPROXY_OPTS=
-
-# Extra ssh options.  Empty by default.
-# export HDFSPROXY_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HDFSPROXY_CONF_DIR"
-
-# Where log files are stored.  $HDFSPROXY_HOME/logs by default.
-# export HDFSPROXY_LOG_DIR=${HDFSPROXY_HOME}/logs
-
-# File naming remote slave hosts.  $HDFSPROXY_HOME/conf/slaves by default.
-# export HDFSPROXY_SLAVES=${HDFSPROXY_HOME}/conf/slaves
-
-# host:path where hdfsproxy code should be rsync'd from.  Unset by default.
-# export HDFSPROXY_MASTER=master:/home/$USER/src/hdfsproxy
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HDFSPROXY_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-# export HDFSPROXY_PID_DIR=/var/hdfsproxy/pids
-
-# A string representing this instance of hdfsproxy. $USER by default.
-# export HDFSPROXY_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-# export HDFSPROXY_NICENESS=10
diff --git a/hdfs/src/contrib/hdfsproxy/conf/hdfsproxy-hosts b/hdfs/src/contrib/hdfsproxy/conf/hdfsproxy-hosts
deleted file mode 100644
index 2fbb50c..0000000
--- a/hdfs/src/contrib/hdfsproxy/conf/hdfsproxy-hosts
+++ /dev/null
@@ -1 +0,0 @@
-localhost
diff --git a/hdfs/src/contrib/hdfsproxy/conf/log4j.properties b/hdfs/src/contrib/hdfsproxy/conf/log4j.properties
deleted file mode 100644
index 2520ab3..0000000
--- a/hdfs/src/contrib/hdfsproxy/conf/log4j.properties
+++ /dev/null
@@ -1,61 +0,0 @@
-# Define some default values that can be overridden by system properties
-hdfsproxy.root.logger=INFO,console
-hdfsproxy.log.dir=.
-hdfsproxy.log.file=hdfsproxy.log
-
-# Define the root logger to the system property "hdfsproxy.root.logger".
-log4j.rootLogger=${hdfsproxy.root.logger}
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hdfsproxy.log.dir}/${hdfsproxy.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hdfsproxy.log.dir}/${hdfsproxy.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-# Custom Logging levels
-
-#log4j.logger.org.apache.hadoop.hdfsproxy.HttpsProxy=DEBUG
-#log4j.logger.org.apache.hadoop.hdfsproxy.ProxyFilter=DEBUG
-
diff --git a/hdfs/src/contrib/hdfsproxy/conf/ssl-server.xml b/hdfs/src/contrib/hdfsproxy/conf/ssl-server.xml
deleted file mode 100644
index 641da1a..0000000
--- a/hdfs/src/contrib/hdfsproxy/conf/ssl-server.xml
+++ /dev/null
@@ -1,48 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration>
-
-<property>
-  <name>ssl.server.truststore.location</name>
-  <value>${javax.net.ssl.keyStore.proxy}</value>
-</property>
-
-<property>
-  <name>ssl.server.truststore.password</name>
-  <value>changeme</value>
-</property>
-
-<property>
-  <name>ssl.server.keystore.location</name>
-  <value>${javax.net.ssl.keyStore.proxy}</value>
-</property>
-
-<property>
-  <name>ssl.server.keystore.password</name>
-  <value>changeme</value>
-</property>
-
-<property>
-  <name>ssl.server.keystore.keypassword</name>
-  <value>changeme</value>
-</property>
-
-</configuration>
diff --git a/hdfs/src/contrib/hdfsproxy/conf/tomcat-forward-web.xml b/hdfs/src/contrib/hdfsproxy/conf/tomcat-forward-web.xml
deleted file mode 100644
index cf30667..0000000
--- a/hdfs/src/contrib/hdfsproxy/conf/tomcat-forward-web.xml
+++ /dev/null
@@ -1,109 +0,0 @@
-<?xml version="1.0" encoding="ISO-8859-1"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!DOCTYPE web-app 
-    PUBLIC "-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN" 
-    "http://java.sun.com/dtd/web-app_2_3.dtd">
-
-<web-app>
-
-
-    <!-- General description of your web application -->
-
-    <display-name>HDFS Proxy</display-name>
-    <description>
-      get data from grid forward war
-    </description>
-
-    <context-param>
-      <param-name>webmaster</param-name>
-      <param-value>zhiyong1@yahoo-inc.com</param-value>
-      <description>
-        The EMAIL address of the administrator to whom questions
-        and comments about this application should be addressed.
-      </description>
-    </context-param>
-          
-    <filter>
-	   	<filter-name>ldapIpDirFilter</filter-name>
-	   	<filter-class>org.apache.hadoop.hdfsproxy.LdapIpDirFilter</filter-class>
-		</filter>
-
-		<filter-mapping>
-        <filter-name>ldapIpDirFilter</filter-name>
-				<url-pattern>/*</url-pattern>
-    </filter-mapping>
-
-
-
-    
-    <servlet>
-    	<servlet-name>proxyForward</servlet-name>
-      <description>forward data access to specifc servlets</description>
-      <servlet-class>org.apache.hadoop.hdfsproxy.ProxyForwardServlet</servlet-class>
-    </servlet>
-    
-    <servlet-mapping>
-        <servlet-name>proxyForward</servlet-name>
-        <url-pattern>/listPaths/*</url-pattern>
-    </servlet-mapping>
-	  <servlet-mapping>
-        <servlet-name>proxyForward</servlet-name>
-        <url-pattern>/data/*</url-pattern>
-    </servlet-mapping>
-    <servlet-mapping>
-        <servlet-name>proxyForward</servlet-name>
-        <url-pattern>/streamFile/*</url-pattern>
-    </servlet-mapping>
-    
-    <servlet>
-    	<servlet-name>fileForward</servlet-name>
-      <description>forward file data access to streamFile</description>
-      <servlet-class>org.apache.hadoop.hdfsproxy.ProxyFileForward</servlet-class>
-    </servlet>
-    
-    <servlet-mapping>
-        <servlet-name>fileForward</servlet-name>
-        <url-pattern>/file/*</url-pattern>
-    </servlet-mapping>
-    
-    
-
-		<welcome-file-list>
-		  <welcome-file>index.html</welcome-file>
-		</welcome-file-list>
-
-    <!-- Define the default session timeout for your application,
-         in minutes.  From a servlet or JSP page, you can modify
-         the timeout for a particular session dynamically by using
-         HttpSession.getMaxInactiveInterval(). -->
-
-    <session-config>
-      <session-timeout>30</session-timeout>    <!-- 30 minutes -->
-    </session-config>    
-
-
-</web-app>
-
-
-
-
-
-
-
-
diff --git a/hdfs/src/contrib/hdfsproxy/conf/tomcat-web.xml b/hdfs/src/contrib/hdfsproxy/conf/tomcat-web.xml
deleted file mode 100644
index 74f8af5..0000000
--- a/hdfs/src/contrib/hdfsproxy/conf/tomcat-web.xml
+++ /dev/null
@@ -1,166 +0,0 @@
-<?xml version="1.0" encoding="ISO-8859-1"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!DOCTYPE web-app
-    PUBLIC "-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN"
-    "http://java.sun.com/dtd/web-app_2_3.dtd">
-
-<web-app>
-
-
-    <!-- General description of your web application -->
-
-    <display-name>HDFS Proxy</display-name>
-    <description>
-      get data from grid
-    </description>
-
-
-    <!-- Context initialization parameters that define shared
-         String constants used within your application, which
-         can be customized by the system administrator who is
-         installing your application.  The values actually
-         assigned to these parameters can be retrieved in a
-         servlet or JSP page by calling:
-
-             String value =
-               getServletContext().getInitParameter("name");
-
-         where "name" matches the <param-name> element of
-         one of these initialization parameters.
-
-         You can define any number of context initialization
-         parameters, including zero.
-    -->
-
-    <context-param>
-      <param-name>webmaster</param-name>
-      <param-value>zhiyong1@yahoo-inc.com</param-value>
-      <description>
-        The EMAIL address of the administrator to whom questions
-        and comments about this application should be addressed.
-      </description>
-    </context-param>
-
-    <filter>
-	   	<filter-name>ldapIpDirFilter</filter-name>
-	   	<filter-class>org.apache.hadoop.hdfsproxy.LdapIpDirFilter</filter-class>
-		</filter>
-
-    <filter>
-        <filter-name>authorizationFilter</filter-name>
-        <filter-class>org.apache.hadoop.hdfsproxy.KerberosAuthorizationFilter</filter-class>
-    </filter>
-
-    <filter-mapping>
-        <filter-name>ldapIpDirFilter</filter-name>
-        <url-pattern>/*</url-pattern>
-        <dispatcher>REQUEST</dispatcher>
-        <dispatcher>FORWARD</dispatcher>
-    </filter-mapping>
-
-    <filter-mapping>
-        <filter-name>authorizationFilter</filter-name>
-        <url-pattern>/*</url-pattern>
-        <dispatcher>REQUEST</dispatcher>
-        <dispatcher>FORWARD</dispatcher>
-    </filter-mapping>
-
-    <!-- Servlet definitions for the servlets that make up
-         your web application, including initialization
-         parameters.  With Tomcat, you can also send requests
-         to servlets not listed here with a request like this:
-
-           http://localhost:8080/{context-path}/servlet/{classname}
-
-         but this usage is not guaranteed to be portable.  It also
-         makes relative references to images and other resources
-         required by your servlet more complicated, so defining
-         all of your servlets (and defining a mapping to them with
-         a servlet-mapping element) is recommended.
-
-         Servlet initialization parameters can be retrieved in a
-         servlet or JSP page by calling:
-
-             String value =
-               getServletConfig().getInitParameter("name");
-
-         where "name" matches the <param-name> element of
-         one of these initialization parameters.
-
-         You can define any number of servlets, including zero.
-    -->
-
-
-    <servlet>
-    	<servlet-name>listPaths</servlet-name>
-      <description>list paths data access</description>
-      <servlet-class>org.apache.hadoop.hdfsproxy.ProxyListPathsServlet</servlet-class>
-    </servlet>
-
-    <servlet-mapping>
-        <servlet-name>listPaths</servlet-name>
-        <url-pattern>/listPaths/*</url-pattern>
-    </servlet-mapping>
-
-		<servlet>
-    	<servlet-name>data</servlet-name>
-      <description>data access</description>
-      <servlet-class>org.apache.hadoop.hdfsproxy.ProxyFileDataServlet</servlet-class>
-    </servlet>
-
-	  <servlet-mapping>
-        <servlet-name>data</servlet-name>
-        <url-pattern>/data/*</url-pattern>
-    </servlet-mapping>
-
-    <servlet>
-    	<servlet-name>streamFile</servlet-name>
-      <description>stream file access</description>
-      <servlet-class>org.apache.hadoop.hdfsproxy.ProxyStreamFile</servlet-class>
-    </servlet>
-
-    <servlet-mapping>
-        <servlet-name>streamFile</servlet-name>
-        <url-pattern>/streamFile/*</url-pattern>
-    </servlet-mapping>
-
-
-		<welcome-file-list>
-		  <welcome-file>index.html</welcome-file>
-		</welcome-file-list>
-
-    <!-- Define the default session timeout for your application,
-         in minutes.  From a servlet or JSP page, you can modify
-         the timeout for a particular session dynamically by using
-         HttpSession.getMaxInactiveInterval(). -->
-
-    <session-config>
-      <session-timeout>30</session-timeout>    <!-- 30 minutes -->
-    </session-config>
-
-
-</web-app>
-
-
-
-
-
-
-
-
diff --git a/hdfs/src/contrib/hdfsproxy/conf/user-certs.xml b/hdfs/src/contrib/hdfsproxy/conf/user-certs.xml
deleted file mode 100644
index 3e3c08f..0000000
--- a/hdfs/src/contrib/hdfsproxy/conf/user-certs.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- 
-
-This file defines the mappings from username to comma seperated list
-of certificate serial numbers that the user is allowed to use. One mapping
-per user. Wildcard characters, such as "*" and "?", are not recognized. 
-Any leading or trailing whitespaces are stripped/ignored.
-
--->
-
-<configuration>
-<property>
-  <name> nobody </name>
-  <value> ,6  ,,  3 , 9a2cf0be9ddf8280
-
-
-
-         </value>
-</property>
-
-<property>
-  <name> Admin </name>
-  <value>, 6,  ,,  3 , 9a2cf0be9ddf8280
-
-
-
-         </value>
-</property>
-
-</configuration>
diff --git a/hdfs/src/contrib/hdfsproxy/conf/user-permissions.xml b/hdfs/src/contrib/hdfsproxy/conf/user-permissions.xml
deleted file mode 100644
index 819c1a6..0000000
--- a/hdfs/src/contrib/hdfsproxy/conf/user-permissions.xml
+++ /dev/null
@@ -1,26 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- 
-
-This file defines the mappings from user name to comma seperated list
-of directories/files that the user is allowed to access. One mapping
-per user. Wildcard characters, such as "*" and "?", are not recognized. 
-For example, to match "/output" directory, one can use "/output" or 
-"/output/", but not "/output/*". Note that any leading or trailing
-whitespaces are stripped/ignored for the name field.
-
--->
-
-<configuration>
-<property>
-  <name> nobody </name>
-  <value> ,
-
-
-
-        /input, /user, /data </value>
-</property>
-
-
-</configuration>
diff --git a/hdfs/src/contrib/hdfsproxy/ivy.xml b/hdfs/src/contrib/hdfsproxy/ivy.xml
deleted file mode 100644
index e32783b..0000000
--- a/hdfs/src/contrib/hdfsproxy/ivy.xml
+++ /dev/null
@@ -1,127 +0,0 @@
-<?xml version="1.0" ?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<ivy-module version="1.0">
-  <info organisation="org.apache.hadoop" module="${ant.project.name}">
-    <license name="Apache 2.0"/>
-    <ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
-    <description>
-        Apache Hadoop contrib
-    </description>
-  </info>
-  <configurations defaultconfmapping="default">
-    <!--these match the Maven configurations-->
-    <conf name="default" extends="master,runtime"/>
-    <conf name="master" description="contains the artifact but no dependencies"/>
-    <conf name="runtime" description="runtime but not the artifact" />
-
-    <conf name="common" visibility="private" 
-      description="artifacts needed to compile/test the application"/>
-  </configurations>
-
-  <publications>
-    <!--get the artifact from our module name-->
-    <artifact conf="master"/>
-  </publications>
-  <dependencies>
-    <dependency org="org.apache.hadoop"
-      name="hadoop-common"
-      rev="${hadoop-common.version}"
-      conf="common->default"/>
-    <dependency org="org.apache.hadoop"
-      name="hadoop-common-test"
-      rev="${hadoop-common.version}"
-      conf="common->default"/>
-    <dependency org="commons-cli"
-      name="commons-cli"
-      rev="${commons-cli.version}"
-      conf="common->default"/>
-    <dependency org="log4j"
-      name="log4j"
-      rev="${log4j.version}"
-      conf="common->master"/>
-    <dependency org="commons-logging"
-      name="commons-logging"
-      rev="${commons-logging.version}"
-      conf="common->master"/>
-    <dependency org="commons-logging"
-      name="commons-logging-api"
-      rev="${commons-logging-api.version}"
-      conf="common->master"/>
-    <dependency org="junit"
-      name="junit"
-      rev="${junit.version}"
-      conf="common->master"/>
-    <dependency org="org.slf4j"
-      name="slf4j-api"
-      rev="${slf4j-api.version}"
-      conf="common->master"/>
-    <dependency org="org.slf4j"
-      name="slf4j-log4j12"
-      rev="${slf4j-log4j12.version}"
-      conf="common->master"/>
-    <dependency org="xmlenc"
-      name="xmlenc"
-      rev="${xmlenc.version}"
-      conf="common->master"/>
-    <dependency org="org.mortbay.jetty"
-      name="jetty"
-      rev="${jetty.version}"
-      conf="common->master"/>
-    <dependency org="org.mortbay.jetty"
-      name="jsp-api-2.1"
-      rev="${jetty.version}"
-      conf="common->master"/>
-    <dependency org="org.mortbay.jetty"
-      name="jsp-2.1"
-      rev="${jetty.version}"
-      conf="common->master"/>
-    <dependency org="org.mortbay.jetty"
-      name="jetty-util"
-      rev="${jetty-util.version}"
-      conf="common->master"/>
-    <dependency org="org.mortbay.jetty"
-      name="servlet-api-2.5"
-      rev="${servlet-api-2.5.version}"
-      conf="common->master"/>
-    <dependency org="org.eclipse.jdt"
-      name="core"
-      rev="${core.version}"
-      conf="common->master"/>
-    <dependency org="org.apache.cactus" name="cactus.core.framework.uberjar.javaEE.14" rev="${cactus.version}" conf="common->master"/>
-		<dependency org="org.apache.cactus" name="cactus.integration.ant" rev="${cactus.version}" conf="common->master"/>
-		<dependency org="org.apache.cactus" name="cactus.integration.shared.api" rev="${cactus.version}" conf="common->master"/>
-				
-		<dependency org="commons-httpclient" name="commons-httpclient" rev="3.1" conf="common->master"/>
-		<dependency org="commons-io" name="commons-io" rev="1.4" conf="common->master"/>
-		<dependency org="commons-lang" name="commons-lang" rev="2.3" conf="common->master"/>
-		<dependency org="commons-codec" name="commons-codec" rev="1.4" conf="common->master"/>
-		
-		<dependency org="aspectj" name="aspectjrt" rev="1.5.3" conf="common->master"/>
-		
-		<dependency org="org.codehaus.cargo" name="cargo-core-uberjar" rev="0.9" conf="common->master"/>
-		<dependency org="org.codehaus.cargo" name="cargo-ant" rev="0.9" conf="common->master"/>
-
-		<dependency org="javax.servlet" name="jsp-api" rev="2.0" conf="common->master"/>
-		<dependency org="javax.servlet" name="servlet-api" rev="2.5" conf="common->master"/>
-		<dependency org="javax.servlet" name="jstl" rev="1.1.2" conf="common->master"/>
-		<dependency org="taglibs" name="standard" rev="1.1.2" conf="common->master"/>
-	  
-		<dependency org="junitperf" name="junitperf" rev="1.8" conf="common->master"/>
-  </dependencies>
-</ivy-module>
diff --git a/hdfs/src/contrib/hdfsproxy/ivy/libraries.properties b/hdfs/src/contrib/hdfsproxy/ivy/libraries.properties
deleted file mode 100644
index 6b2e55f..0000000
--- a/hdfs/src/contrib/hdfsproxy/ivy/libraries.properties
+++ /dev/null
@@ -1,18 +0,0 @@
-#This properties file lists the versions of the various artifacts used by hadoop.
-#It drives ivy and the generation of a maven POM
-#These are the versions of our dependencies (in alphabetical order)
-ivy.version=2.1.0
-
-log4j.version=1.2.15
-slf4j-api.version=1.5.11
-slf4j-log4j12.version=1.5.11
-jetty.version=6.1.14
-jetty-util.version=6.1.14
-servlet-api-2.5.version=6.1.14
-cactus.version=1.8.0
-commons-logging.version=1.1.1
-commons-logging-api.version=1.1
-junit.version=4.8.1
-jsp.version=2.1
-core.version=3.1.1
-xmlenc.version=0.52
\ No newline at end of file
diff --git a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/AuthorizationFilter.java b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/AuthorizationFilter.java
deleted file mode 100644
index 2a32320..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/AuthorizationFilter.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfsproxy;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.conf.Configuration;
-
-import javax.servlet.*;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.IOException;
-import java.util.List;
-import java.util.Arrays;
-import java.util.regex.Pattern;
-import java.util.regex.Matcher;
-
-public class AuthorizationFilter implements Filter {
-  public static final Log LOG = LogFactory.getLog(AuthorizationFilter.class);
-
-  private static final Pattern HDFS_PATH_PATTERN = Pattern
-      .compile("(^hdfs://([\\w\\-]+(\\.)?)+:\\d+|^hdfs://([\\w\\-]+(\\.)?)+)");
-
-  /** Pattern for a filter to find out if a request is HFTP/HSFTP request */
-  protected static final Pattern HFTP_PATTERN = Pattern
-      .compile("^(/listPaths|/data|/streamFile|/file)$");
-
-  protected String namenode;
-  
-  /** {@inheritDoc} **/
-  public void init(FilterConfig filterConfig) throws ServletException {
-    Configuration conf = new Configuration(false);
-    conf.addResource("hdfsproxy-default.xml");
-    conf.addResource("hdfsproxy-site.xml");
-    namenode = conf.get("fs.default.name");
-  }
-
-  /** {@inheritDoc} **/
-  @SuppressWarnings("unchecked")
-  public void doFilter(ServletRequest request,
-                       ServletResponse response,
-                       FilterChain chain)
-      throws IOException, ServletException {
-
-    HttpServletResponse rsp = (HttpServletResponse) response;
-    HttpServletRequest rqst = (HttpServletRequest) request;
-
-    String userId = getUserId(request);
-    String groups = getGroups(request);
-    List<Path> allowedPaths = getAllowedPaths(request);
-
-    UserGroupInformation ugi =
-        UserGroupInformation.createRemoteUser(userId);
-
-    String filePath = getPathFromRequest(rqst);
-
-    if (filePath == null || !checkHdfsPath(filePath, allowedPaths)) {
-      String msg = "User " + userId + " (" + groups
-          + ") is not authorized to access path " + filePath;
-      LOG.warn(msg);
-      rsp.sendError(HttpServletResponse.SC_FORBIDDEN, msg);
-      return;
-    }
-    request.setAttribute("authorized.ugi", ugi);
-
-    LOG.info("User: " + userId + "(" + groups +
-        ") Request: " + rqst.getPathInfo() + " From: " +
-        rqst.getRemoteAddr());
-
-    chain.doFilter(request, response);
-  }
-
-  protected String getUserId(ServletRequest rqst) {
-    String userId = (String) rqst.
-        getAttribute("org.apache.hadoop.hdfsproxy.authorized.userID");
-    if (userId != null)
-      userId = userId.split("[/@]")[0];
-    return userId;
-  }
-
-  protected String getGroups(ServletRequest request) {
-    UserGroupInformation ugi = UserGroupInformation.
-        createRemoteUser(getUserId(request));
-    return Arrays.toString(ugi.getGroupNames());
-  }
-
-  @SuppressWarnings("unchecked")
-  protected List<Path> getAllowedPaths(ServletRequest request) {
-    return (List<Path>)request.
-        getAttribute("org.apache.hadoop.hdfsproxy.authorized.paths");
-  }
-
-  protected String getPathFromRequest(HttpServletRequest rqst) {
-    String filePath = null;
-    // check request path
-    String servletPath = rqst.getServletPath();
-    if (HFTP_PATTERN.matcher(servletPath).matches()) {
-        // file path as part of the URL
-        filePath = rqst.getPathInfo() != null ? rqst.getPathInfo() : "/";
-    }
-    return filePath;
-  }
-
-  /** check that the requested path is listed in the ldap entry
-   * @param pathInfo - Path to check access
-   * @param ldapPaths - List of paths allowed access
-   * @return true if access allowed, false otherwise */
-  public boolean checkHdfsPath(String pathInfo,
-                               List<Path> ldapPaths) {
-    if (pathInfo == null || pathInfo.length() == 0) {
-      LOG.info("Can't get file path from the request");
-      return false;
-    }
-    for (Path ldapPathVar : ldapPaths) {
-      String ldapPath = ldapPathVar.toString();
-      if (isPathQualified(ldapPath) &&
-          isPathAuthroized(ldapPath)) {
-        String allowedPath = extractPath(ldapPath);
-        if (pathInfo.startsWith(allowedPath))
-          return true;
-      } else {
-        if (pathInfo.startsWith(ldapPath))
-          return true;
-      }
-    }
-    return false;
-  }
-
-  private String extractPath(String ldapPath) {
-    return HDFS_PATH_PATTERN.split(ldapPath)[1];
-  }
-
-  private boolean isPathAuthroized(String pathStr) {
-    Matcher namenodeMatcher = HDFS_PATH_PATTERN.matcher(pathStr);
-    return namenodeMatcher.find() && namenodeMatcher.group().contains(namenode);
-  }
-
-  private boolean isPathQualified(String pathStr) {
-    if (pathStr == null || pathStr.trim().isEmpty()) {
-      return false;
-    } else {
-      return HDFS_PATH_PATTERN.matcher(pathStr).find();
-    }
-  }
-
-  /** {@inheritDoc} **/
-  public void destroy() {
-  }
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java
deleted file mode 100644
index 1837adb..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/HdfsProxy.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfsproxy;
-
-import java.io.IOException;
-
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URL;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.server.common.JspHelper;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
-
-/**
- * A HTTPS/SSL proxy to HDFS, implementing certificate based access control.
- */
-public class HdfsProxy {
-  public static final Log LOG = LogFactory.getLog(HdfsProxy.class);
-
-  private ProxyHttpServer server;
-  private InetSocketAddress sslAddr;
-  
-  /** Construct a proxy from the given configuration */
-  public HdfsProxy(Configuration conf) throws IOException {
-    try {
-      initialize(conf);
-    } catch (IOException e) {
-      this.stop();
-      throw e;
-    }
-  }
-  
-  private void initialize(Configuration conf) throws IOException {
-    sslAddr = getSslAddr(conf);
-    String nn = conf.get("hdfsproxy.dfs.namenode.address");
-    if (nn == null)
-      throw new IOException("HDFS NameNode address is not specified");
-    InetSocketAddress nnAddr = NetUtils.createSocketAddr(nn);
-    LOG.info("HDFS NameNode is at: " + nnAddr.getHostName() + ":" + nnAddr.getPort());
-
-    Configuration sslConf = new HdfsConfiguration(false);
-    sslConf.addResource(conf.get("hdfsproxy.https.server.keystore.resource",
-        "ssl-server.xml"));
-    // unit testing
-    sslConf.set("proxy.http.test.listener.addr",
-                conf.get("proxy.http.test.listener.addr"));
-
-    this.server = new ProxyHttpServer(sslAddr, sslConf);
-    this.server.setAttribute("proxy.https.port", server.getPort());
-    this.server.setAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY, nnAddr);
-    this.server.setAttribute(JspHelper.CURRENT_CONF, new HdfsConfiguration());
-    this.server.addGlobalFilter("ProxyFilter", ProxyFilter.class.getName(), null);
-    this.server.addServlet("listPaths", "/listPaths/*", ProxyListPathsServlet.class);
-    this.server.addServlet("data", "/data/*", ProxyFileDataServlet.class);
-    this.server.addServlet("streamFile", "/streamFile/*", ProxyStreamFile.class);
-  }
-
-  /** return the http port if any, only for testing purposes */
-  int getPort() throws IOException {
-    return server.getPort();
-  }
-  
-  /**
-   * Start the server.
-   */
-  public void start() throws IOException {
-    this.server.start();
-    LOG.info("HdfsProxy server up at: " + sslAddr.getHostName() + ":"
-        + sslAddr.getPort());
-  }
-  
-  /**
-   * Stop all server threads and wait for all to finish.
-   */
-  public void stop() {
-    try {
-      if (server != null) {
-        server.stop();
-        server.join();
-      }
-    } catch (Exception e) {
-      LOG.warn("Got exception shutting down proxy", e);
-    }
-  }
-  
-  /**
-   * Wait for service to finish.
-   * (Normally, it runs forever.)
-   */
-  public void join() {
-    try {
-      this.server.join();
-    } catch (InterruptedException ie) {
-    }
-  }
-  
-  static InetSocketAddress getSslAddr(Configuration conf) throws IOException {
-    String addr = conf.get("hdfsproxy.https.address");
-    if (addr == null)
-      throw new IOException("HdfsProxy address is not specified");
-    return NetUtils.createSocketAddr(addr);
-  }
-
- 
-
-  public static HdfsProxy createHdfsProxy(String argv[], Configuration conf)
-      throws IOException {
-    if (argv.length > 0) {
-      System.err.println("Usage: HdfsProxy");
-      return null;
-    }
-    if (conf == null) {
-      conf = new HdfsConfiguration(false);
-      conf.addResource("hdfsproxy-default.xml");
-    }
-   
-    StringUtils.startupShutdownMessage(HdfsProxy.class, argv, LOG);
-    HdfsProxy proxy = new HdfsProxy(conf);
-    proxy.start();
-    return proxy;
-  }
-
-  public static void main(String[] argv) throws Exception {
-    try {
-      HdfsProxy proxy = createHdfsProxy(argv, null);
-      if (proxy != null)
-        proxy.join();
-    } catch (Throwable e) {
-      LOG.error(StringUtils.stringifyException(e));
-      System.exit(-1);
-    }
-  }
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/KerberosAuthorizationFilter.java b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/KerberosAuthorizationFilter.java
deleted file mode 100644
index e34fc31..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/KerberosAuthorizationFilter.java
+++ /dev/null
@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfsproxy;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import java.io.IOException;
-
-/**
- * This filter is required for hdfsproxies connecting to HDFS
- * with kerberos authentication. Keytab file and principal to
- * use for proxy user is retrieved from a configuration file.
- * If user attribute in ldap doesn't kerberos realm, the 
- * default realm is picked up from configuration. 
- */
-public class KerberosAuthorizationFilter
-    extends AuthorizationFilter {
-
-  private String defaultRealm;
-
-  @Override
-  public void init(FilterConfig filterConfig) throws ServletException {
-    super.init(filterConfig);
-    Configuration conf = new Configuration(false);
-    conf.addResource("hdfsproxy-default.xml");
-    conf.addResource("hdfsproxy-site.xml");
-    initializeUGI(conf);
-    initDefaultRealm(conf);
-  }
-
-  private void initializeUGI(Configuration conf) {
-    try {
-      conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
-          "kerberos");
-
-      UserGroupInformation.setConfiguration(conf);
-      UserGroupInformation.loginUserFromKeytab(
-          conf.get("hdfsproxy.kerberos.principal"),
-          conf.get("hdfsproxy.kerberos.keytab"));
-
-      LOG.info("Logged in user: " +
-          UserGroupInformation.getLoginUser().getUserName() +
-          ", Current User: " + UserGroupInformation.getCurrentUser().getUserName());
-
-    } catch (IOException e) {
-      throw new RuntimeException("Unable to initialize credentials", e);
-    }
-  }
-
-  private void initDefaultRealm(Configuration conf) {
-    defaultRealm = conf.get("hdfsproxy.kerberos.default.realm","");
-  }
-
-  @Override
-  /** If the userid does not have realm, add the default realm */
-  protected String getUserId(ServletRequest request) {
-    String userId = (String) request.
-        getAttribute("org.apache.hadoop.hdfsproxy.authorized.userID");
-    return userId +
-        (userId.indexOf('@') > 0 ? "" : defaultRealm);
-  }
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/LdapIpDirFilter.java b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/LdapIpDirFilter.java
deleted file mode 100644
index 3c763a4..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/LdapIpDirFilter.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfsproxy;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Hashtable;
-import java.util.regex.Pattern;
-
-import javax.naming.NamingEnumeration;
-import javax.naming.NamingException;
-import javax.naming.directory.Attribute;
-import javax.naming.directory.Attributes;
-import javax.naming.directory.BasicAttribute;
-import javax.naming.directory.BasicAttributes;
-import javax.naming.directory.SearchResult;
-import javax.naming.ldap.InitialLdapContext;
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletContext;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-
-public class LdapIpDirFilter implements Filter {
-  public static final Log LOG = LogFactory.getLog(LdapIpDirFilter.class);
-
-  private static String baseName;
-  private static String hdfsIpSchemaStr;
-  private static String hdfsIpSchemaStrPrefix;
-  private static String hdfsUidSchemaStr;
-  private static String hdfsPathSchemaStr;
-
-  private InitialLdapContext lctx;
-
-  private class LdapRoleEntry {
-    String userId;
-    ArrayList<Path> paths;
-
-    void init(String userId, ArrayList<Path> paths) {
-      this.userId = userId;
-      this.paths = paths;
-    }
-
-    boolean contains(Path path) {
-      return paths != null && paths.contains(path);
-    }
-
-    @Override
-    public String toString() {
-      return "LdapRoleEntry{" +
-          ", userId='" + userId + '\'' +
-          ", paths=" + paths +
-          '}';
-    }
-  }
-
-  public void initialize(String bName, InitialLdapContext ctx) {
-    // hook to cooperate unit test
-    baseName = bName;
-    hdfsIpSchemaStr = "uniqueMember";
-    hdfsIpSchemaStrPrefix = "cn=";
-    hdfsUidSchemaStr = "uid";
-    hdfsPathSchemaStr = "documentLocation";
-    lctx = ctx;
-  }
-
-  /** {@inheritDoc} */
-  public void init(FilterConfig filterConfig) throws ServletException {
-    ServletContext context = filterConfig.getServletContext();
-    Configuration conf = new HdfsConfiguration(false);
-    conf.addResource("hdfsproxy-default.xml");
-    conf.addResource("hdfsproxy-site.xml");
-    // extract namenode from source conf.
-    String nn = ProxyUtil.getNamenode(conf);
-    InetSocketAddress nAddr = NetUtils.createSocketAddr(nn);
-    context.setAttribute("name.node.address", nAddr);
-    context.setAttribute("name.conf", conf);
-
-    // for storing hostname <--> cluster mapping to decide which source cluster
-    // to forward
-    context.setAttribute("org.apache.hadoop.hdfsproxy.conf", conf);
-
-    if (lctx == null) {
-      Hashtable<String, String> env = new Hashtable<String, String>();
-      env.put(InitialLdapContext.INITIAL_CONTEXT_FACTORY, conf.get(
-          "hdfsproxy.ldap.initial.context.factory",
-          "com.sun.jndi.ldap.LdapCtxFactory"));
-      env.put(InitialLdapContext.PROVIDER_URL, conf
-          .get("hdfsproxy.ldap.provider.url"));
-
-      try {
-        lctx = new InitialLdapContext(env, null);
-      } catch (NamingException ne) {
-        throw new ServletException("NamingException in initializing ldap"
-            + ne.toString());
-      }
-
-      baseName = conf.get("hdfsproxy.ldap.role.base");
-      hdfsIpSchemaStr = conf.get("hdfsproxy.ldap.ip.schema.string",
-          "uniqueMember");
-      hdfsIpSchemaStrPrefix = conf.get(
-          "hdfsproxy.ldap.ip.schema.string.prefix", "cn=");
-      hdfsUidSchemaStr = conf.get("hdfsproxy.ldap.uid.schema.string", "uid");
-      hdfsPathSchemaStr = conf.get("hdfsproxy.ldap.hdfs.path.schema.string",
-          "documentLocation");
-    }
-    LOG.info("LdapIpDirFilter initialization successful");
-  }
-
-  /** {@inheritDoc} */
-  public void destroy() {
-  }
-
-  /** {@inheritDoc} */
-  public void doFilter(ServletRequest request, ServletResponse response,
-      FilterChain chain) throws IOException, ServletException {
-
-    String prevThreadName = Thread.currentThread().getName();
-
-    try {
-      HttpServletRequest rqst = (HttpServletRequest) request;
-      HttpServletResponse rsp = (HttpServletResponse) response;
-
-      String contextPath = rqst.getContextPath();
-      Thread.currentThread().setName(contextPath);
-
-      if (LOG.isDebugEnabled()) {
-        StringBuilder b = new StringBuilder("Request from ").append(
-            rqst.getRemoteHost()).append("/").append(rqst.getRemoteAddr())
-            .append(":").append(rqst.getRemotePort());
-        b.append("\n The Scheme is " + rqst.getScheme());
-        b.append("\n The Path Info is " + rqst.getPathInfo());
-        b.append("\n The Translated Path Info is " + rqst.getPathTranslated());
-        b.append("\n The Context Path is " + rqst.getContextPath());
-        b.append("\n The Query String is " + rqst.getQueryString());
-        b.append("\n The Request URI is " + rqst.getRequestURI());
-        b.append("\n The Request URL is " + rqst.getRequestURL());
-        b.append("\n The Servlet Path is " + rqst.getServletPath());
-        LOG.debug(b.toString());
-      }
-      LdapRoleEntry ldapent = new LdapRoleEntry();
-      // check ip address
-      String userIp = rqst.getRemoteAddr();
-      try {
-        boolean isAuthorized = getLdapRoleEntryFromUserIp(userIp, ldapent);
-        if (!isAuthorized) {
-          rsp.sendError(HttpServletResponse.SC_FORBIDDEN, "IP " + userIp
-              + " is not authorized to access");
-          return;
-        }
-      } catch (NamingException ne) {
-        throw new IOException("NamingException while searching ldap"
-            + ne.toString());
-      }
-
-      // since we cannot pass ugi object cross context as they are from
-      // different
-      // classloaders in different war file, we have to use String attribute.
-      rqst.setAttribute("org.apache.hadoop.hdfsproxy.authorized.userID",
-        ldapent.userId);
-      rqst.setAttribute("org.apache.hadoop.hdfsproxy.authorized.paths",
-        ldapent.paths);
-      LOG.info("User: " + ldapent.userId + ", Request: " + rqst.getPathInfo() +
-              " From: " + rqst.getRemoteAddr());
-      chain.doFilter(request, response);
-    } finally {
-      Thread.currentThread().setName(prevThreadName);
-    }
-  }
-
-  /**
-   * check if client's ip is listed in the Ldap Roles if yes, return true and
-   * update ldapent. if not, return false
-   * */
-  @SuppressWarnings("unchecked")
-  private boolean getLdapRoleEntryFromUserIp(String userIp,
-      LdapRoleEntry ldapent) throws NamingException {
-    String ipMember = hdfsIpSchemaStrPrefix + userIp;
-    Attributes matchAttrs = new BasicAttributes(true);
-    matchAttrs.put(new BasicAttribute(hdfsIpSchemaStr, ipMember));
-    matchAttrs.put(new BasicAttribute(hdfsUidSchemaStr));
-    matchAttrs.put(new BasicAttribute(hdfsPathSchemaStr));
-
-    String[] attrIDs = { hdfsUidSchemaStr, hdfsPathSchemaStr };
-
-    NamingEnumeration<SearchResult> results = lctx.search(baseName, matchAttrs,
-        attrIDs);
-    if (results.hasMore()) {
-      String userId = null;
-      ArrayList<Path> paths = new ArrayList<Path>();
-      SearchResult sr = results.next();
-      Attributes attrs = sr.getAttributes();
-      for (NamingEnumeration ne = attrs.getAll(); ne.hasMore();) {
-        Attribute attr = (Attribute) ne.next();
-        if (hdfsUidSchemaStr.equalsIgnoreCase(attr.getID())) {
-          userId = (String) attr.get();
-        } else if (hdfsPathSchemaStr.equalsIgnoreCase(attr.getID())) {
-          for (NamingEnumeration e = attr.getAll(); e.hasMore();) {
-            String pathStr = (String) e.next();
-            paths.add(new Path(pathStr));
-          }
-        }
-      }
-      ldapent.init(userId, paths);
-      if (LOG.isDebugEnabled()) LOG.debug(ldapent);
-      return true;
-    }
-    LOG.info("Ip address " + userIp
-        + " is not authorized to access the proxy server");
-    return false;
-  }
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java
deleted file mode 100644
index c9c8abd..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileDataServlet.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfsproxy;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.net.URISyntaxException;
-
-import javax.servlet.http.HttpServletRequest;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.common.JspHelper;
-import org.apache.hadoop.hdfs.server.namenode.FileDataServlet;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
-import org.apache.hadoop.security.UserGroupInformation;
-
-/** {@inheritDoc} */
-public class ProxyFileDataServlet extends FileDataServlet {
-  /** For java.io.Serializable */
-  private static final long serialVersionUID = 1L;
-
-  /** {@inheritDoc} */
-  @Override
-  protected URI createUri(String parent, HdfsFileStatus i, UserGroupInformation ugi,
-      ClientProtocol nnproxy, HttpServletRequest request, String dt) throws IOException,
-      URISyntaxException {
-    String dtParam="";
-    if (dt != null) {
-      dtParam=JspHelper.getDelegationTokenUrlParam(dt);
-    }
-    InetSocketAddress nnAddress = (InetSocketAddress) getServletContext()
-        .getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY);
-    String nnHostPort = nnAddress == null ? null : NameNode
-        .getHostPortString(nnAddress);
-    String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS,
-        nnHostPort);
-    return new URI(request.getScheme(), null, request.getServerName(), request
-        .getServerPort(), "/streamFile" + i.getFullName(parent),
-        "&ugi=" + ugi.getShortUserName() + dtParam + addrParam, null);
-  }
-
-  /** {@inheritDoc} */
-  @Override
-  protected UserGroupInformation getUGI(HttpServletRequest request,
-                                        Configuration conf) {
-    String userID = (String) request
-        .getAttribute("org.apache.hadoop.hdfsproxy.authorized.userID");
-    return ProxyUtil.getProxyUGIFor(userID);
-  }
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileForward.java b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileForward.java
deleted file mode 100644
index 1f99ef4..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFileForward.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfsproxy;
-
-import javax.servlet.http.HttpServletRequest;
-
-import org.apache.hadoop.security.UserGroupInformation;
-
-
-public class ProxyFileForward extends ProxyForwardServlet {
-  /** For java.io.Serializable */
-  private static final long serialVersionUID = 1L;
-
-  /** {@inheritDoc} */
-  @Override
-  protected String buildForwardPath(HttpServletRequest request, String pathInfo) {
-    String path = "/streamFile";
-    path += request.getPathInfo();
-    String userID = (String) request.
-        getAttribute("org.apache.hadoop.hdfsproxy.authorized.userID");
-    UserGroupInformation ugi = ProxyUtil.getProxyUGIFor(userID);
-    if (ugi != null) {
-      path += "?ugi=" + ugi.getShortUserName();
-    }
-    return path;
-  }
-  
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java
deleted file mode 100644
index e746270..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyFilter.java
+++ /dev/null
@@ -1,368 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfsproxy;
-
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.math.BigInteger;
-import java.security.cert.CertificateFactory;
-import java.security.cert.X509Certificate;
-import java.security.cert.CertificateExpiredException;
-import java.security.cert.CertificateNotYetValidException;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.regex.Pattern;
-import java.net.InetSocketAddress;
-
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.ServletContext;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.net.NetUtils;
-
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-
-public class ProxyFilter implements Filter {
-  public static final Log LOG = LogFactory.getLog(ProxyFilter.class);
-
-  /** Pattern for triggering reload of user permissions */
-  protected static final Pattern RELOAD_PATTERN = Pattern
-      .compile("^(/reloadPermFiles)$");
-  /** Pattern for a filter to find out if a request is HFTP/HSFTP request */
-  protected static final Pattern HFTP_PATTERN = Pattern
-      .compile("^(/listPaths|/data|/streamFile|/file)$");
-  /**
-   * Pattern for a filter to find out if an HFTP/HSFTP request stores its file
-   * path in the extra path information associated with the URL; if not, the
-   * file path is stored in request parameter "filename"
-   */
-  protected static final Pattern FILEPATH_PATTERN = Pattern
-      .compile("^(/listPaths|/data|/file)$");
-
-  private static volatile Map<String, Set<Path>> permsMap;
-  private static volatile Map<String, Set<BigInteger>> certsMap;
-  static {
-    Configuration conf = new HdfsConfiguration(false);
-    conf.addResource("hdfsproxy-default.xml");
-    Map<String, Set<Path>> pMap = getPermMap(conf);
-    permsMap = pMap != null ? pMap : new HashMap<String, Set<Path>>();
-    Map<String, Set<BigInteger>> cMap = getCertsMap(conf);
-    certsMap = cMap != null ? cMap : new HashMap<String, Set<BigInteger>>();
-  }
-  
-
-  /** {@inheritDoc} */
-  public void init(FilterConfig filterConfig) throws ServletException {
-    ServletContext context = filterConfig.getServletContext();
-    Configuration conf = new HdfsConfiguration(false);
-    conf.addResource("hdfsproxy-default.xml");
-    conf.addResource("ssl-server.xml");
-    conf.addResource("hdfsproxy-site.xml");
-    String nn = conf.get("hdfsproxy.dfs.namenode.address");
-    if (nn == null) {
-      throw new ServletException("Proxy source cluster name node address not speficied");
-    }
-    InetSocketAddress nAddr = NetUtils.createSocketAddr(nn);
-    context.setAttribute("name.node.address", nAddr);
-    context.setAttribute("name.conf", new HdfsConfiguration());   
-    
-    context.setAttribute("org.apache.hadoop.hdfsproxy.conf", conf);
-    LOG.info("proxyFilter initialization success: " + nn);
-  }
-
-  private static Map<String, Set<Path>> getPermMap(Configuration conf) {
-    String permLoc = conf.get("hdfsproxy.user.permissions.file.location",
-        "user-permissions.xml");
-    if (conf.getResource(permLoc) == null) {
-      LOG.warn("HdfsProxy user permissions file not found");
-      return null;
-    }
-    Configuration permConf = new HdfsConfiguration(false);
-    permConf.addResource(permLoc);
-    Map<String, Set<Path>> map = new HashMap<String, Set<Path>>();
-    for (Map.Entry<String, String> e : permConf) {
-      String k = e.getKey();
-      String v = e.getValue();
-      if (k != null && k.length() != 0 && v != null && v.length() != 0) {
-        Set<Path> pathSet = new HashSet<Path>();
-        String[] paths = v.split(",\\s*");
-        for (String p : paths) {
-          if (p.length() != 0) {
-            pathSet.add(new Path(p));
-          }
-        }
-        map.put(k, pathSet);
-      }
-    }
-    return map;
-  }
-
-  private static Map<String, Set<BigInteger>> getCertsMap(Configuration conf) {
-    String certsLoc = conf.get("hdfsproxy.user.certs.file.location",
-        "user-certs.xml");
-    if (conf.getResource(certsLoc) == null) {
-      LOG.warn("HdfsProxy user certs file not found");
-      return null;
-    }
-    Configuration certsConf = new HdfsConfiguration(false);
-    certsConf.addResource(certsLoc);
-    Map<String, Set<BigInteger>> map = new HashMap<String, Set<BigInteger>>();
-    for (Map.Entry<String, String> e : certsConf) {
-      String k = e.getKey();
-      String v = e.getValue().trim();
-      if (k != null && k.length() != 0 && v != null && v.length() != 0) {
-        Set<BigInteger> numSet = new HashSet<BigInteger>();
-        String[] serialnumbers = v.split("\\s*,\\s*");
-        for (String num : serialnumbers) {
-          if (num.length() != 0) {
-            numSet.add(new BigInteger(num, 16));
-          }
-        }
-        map.put(k, numSet);
-      }
-    }
-    return map;
-  }
-
-  /** {@inheritDoc} */
-  public void destroy() {
-  }
-  
-  
-
-  /** {@inheritDoc} */
-  public void doFilter(ServletRequest request, ServletResponse response,
-      FilterChain chain) throws IOException, ServletException {
-
-    HttpServletRequest rqst = (HttpServletRequest) request;
-    HttpServletResponse rsp = (HttpServletResponse) response;
-    
-    if (LOG.isDebugEnabled()) {
-      StringBuilder b = new StringBuilder("Request from ").append(
-          rqst.getRemoteHost()).append("/").append(rqst.getRemoteAddr())
-          .append(":").append(rqst.getRemotePort());
-
-      @SuppressWarnings("unchecked")
-      Enumeration<String> e = rqst.getAttributeNames();
-      for (; e.hasMoreElements();) {
-        String attribute = e.nextElement();
-        b.append("\n  " + attribute + " => " + rqst.getAttribute(attribute));
-      }
-
-      X509Certificate[] userCerts = (X509Certificate[]) rqst
-          .getAttribute("javax.servlet.request.X509Certificate");
-      if (userCerts != null)
-        for (X509Certificate cert : userCerts)
-          b.append("\n Client certificate Subject Name is "
-              + cert.getSubjectX500Principal().getName());
-
-      b.append("\n The Scheme is " + rqst.getScheme());
-      b.append("\n The Auth Type is " + rqst.getAuthType());
-      b.append("\n The Path Info is " + rqst.getPathInfo());
-      b.append("\n The Translated Path Info is " + rqst.getPathTranslated());
-      b.append("\n The Context Path is " + rqst.getContextPath());
-      b.append("\n The Query String is " + rqst.getQueryString());
-      b.append("\n The Remote User is " + rqst.getRemoteUser());
-      b.append("\n The User Principal is " + rqst.getUserPrincipal());
-      b.append("\n The Request URI is " + rqst.getRequestURI());
-      b.append("\n The Request URL is " + rqst.getRequestURL());
-      b.append("\n The Servlet Path is " + rqst.getServletPath());
-
-      LOG.debug(b.toString());
-    }
-    
-    boolean unitTest = false;
-    if (rqst.getScheme().equalsIgnoreCase("http") && rqst.getParameter("UnitTest") != null) unitTest = true;
-    
-    if (rqst.getScheme().equalsIgnoreCase("https") || unitTest) {
-      boolean isAuthorized = false;
-      X509Certificate[] certs = (X509Certificate[]) rqst.getAttribute("javax.servlet.request.X509Certificate");
-      
-      if (unitTest) {
-        try {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("==> Entering https unit test");
-          }
-          String SslPath = rqst.getParameter("SslPath");
-          InputStream inStream = new FileInputStream(SslPath);
-          CertificateFactory cf = CertificateFactory.getInstance("X.509");
-          X509Certificate cert = (X509Certificate)cf.generateCertificate(inStream);
-          inStream.close();          
-          certs = new X509Certificate[] {cert};
-        } catch (Exception e) {
-          // do nothing here
-        }
-      } 
-      
-      if (certs == null || certs.length == 0) {
-        rsp.sendError(HttpServletResponse.SC_BAD_REQUEST,
-          "No client SSL certificate received");
-        LOG.info("No Client SSL certificate received");
-        return;       
-      }
-      for (X509Certificate cert : certs) {
-        try {
-          cert.checkValidity();
-        } catch (CertificateExpiredException e) {
-          LOG.info("Received cert for "
-              + cert.getSubjectX500Principal().getName() + " expired");
-          rsp
-              .sendError(HttpServletResponse.SC_FORBIDDEN,
-                  "Certificate expired");
-          return;
-        } catch (CertificateNotYetValidException e) {
-          LOG.info("Received cert for "
-              + cert.getSubjectX500Principal().getName() + " is not yet valid");
-          rsp.sendError(HttpServletResponse.SC_FORBIDDEN,
-              "Certificate is not yet valid");
-          return;
-        }
-      }
-      
-      String[] tokens = certs[0].getSubjectX500Principal().getName().split(
-          "\\s*,\\s*");
-      String userID = null;
-      for (String s : tokens) {
-        if (s.startsWith("CN=")) {
-          userID = s;
-          break;
-        }
-      }
-      if (userID == null || userID.length() < 4) {
-        LOG.info("Can't retrieve user ID from SSL certificate");
-        rsp.sendError(HttpServletResponse.SC_FORBIDDEN,
-            "Can't retrieve user ID from SSL certificate");
-        return;
-      }
-      userID = userID.substring(3);
-      
-      String servletPath = rqst.getServletPath();
-      if (unitTest) { 
-        servletPath = rqst.getParameter("TestSevletPathInfo");
-        LOG.info("this is for unit test purpose only");
-      }
-      
-      if (HFTP_PATTERN.matcher(servletPath).matches()) {
-        // request is an HSFTP request
-        if (FILEPATH_PATTERN.matcher(servletPath).matches()) {
-          // file path as part of the URL
-          isAuthorized = checkPath(userID, certs[0],
-              rqst.getPathInfo() != null ? rqst.getPathInfo() : "/");
-        } else {
-          // file path is stored in "filename" parameter
-          isAuthorized = checkPath(userID, certs[0], rqst
-              .getParameter("filename"));
-        }
-      } else if (RELOAD_PATTERN.matcher(servletPath).matches()
-          && checkUser("Admin", certs[0])) {
-        Configuration conf = new HdfsConfiguration(false);
-        conf.addResource("hdfsproxy-default.xml");
-        Map<String, Set<Path>> permsMap = getPermMap(conf);
-        Map<String, Set<BigInteger>> certsMap = getCertsMap(conf);
-        if (permsMap == null || certsMap == null) {
-          LOG.warn("Permission files reloading failed");
-          rsp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR,
-              "Permission files reloading failed");
-          return;
-        }
-        ProxyFilter.permsMap = permsMap;
-        ProxyFilter.certsMap = certsMap;
-        LOG.info("User permissions and user certs files reloaded");
-        rsp.setStatus(HttpServletResponse.SC_OK);
-        return;
-      } 
-
-      if (!isAuthorized) {
-        rsp.sendError(HttpServletResponse.SC_FORBIDDEN, "Unauthorized access");
-        return;
-      }
-      
-      // request is authorized, set ugi for servlets
-      UserGroupInformation ugi = UserGroupInformation.createRemoteUser(userID);
-      rqst.setAttribute("authorized.ugi", ugi);
-      rqst.setAttribute("org.apache.hadoop.hdfsproxy.authorized.userID", userID);
-    } else if(rqst.getScheme().equalsIgnoreCase("http")) { // http request, set ugi for servlets, only for testing purposes
-      String ugi = rqst.getParameter("ugi");
-      if (ugi != null) {
-        rqst.setAttribute("authorized.ugi", UserGroupInformation.createRemoteUser(ugi));
-        rqst.setAttribute("org.apache.hadoop.hdfsproxy.authorized.userID", ugi.split(",")[0]);
-      } 
-    }
-    chain.doFilter(request, response);
-  }
-
-  /** check that client's cert is listed in the user certs file */
-  private boolean checkUser(String userID, X509Certificate cert) {
-    Set<BigInteger> numSet = certsMap.get(userID);
-    if (numSet == null) {
-      LOG.info("User " + userID + " is not configured in the user certs file");
-      return false;
-    }
-    if (!numSet.contains(cert.getSerialNumber())) {
-      LOG.info("Cert with serial number " + cert.getSerialNumber()
-          + " is not listed for user " + userID);
-      return false;
-    }
-    return true;
-  }
-
-  /** check that the requested path is listed in the user permissions file */
-  private boolean checkPath(String userID, X509Certificate cert, String pathInfo) {
-    if (!checkUser(userID, cert)) {
-      return false;
-    }
-
-    Set<Path> pathSet = permsMap.get(userID);
-    if (pathSet == null) {
-      LOG.info("User " + userID
-              + " is not listed in the user permissions file");
-      return false;
-    }
-    if (pathInfo == null || pathInfo.length() == 0) {
-      LOG.info("Can't get file path from HTTPS request; user is " + userID);
-      return false;
-    }
-    
-    Path userPath = new Path(pathInfo);
-    while (userPath != null) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("\n Checking file path " + userPath);
-      }
-      if (pathSet.contains(userPath))
-        return true;
-      userPath = userPath.getParent();
-    }
-    LOG.info("User " + userID + " is not authorized to access " + pathInfo);
-    return false;
-  }
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyForwardServlet.java b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyForwardServlet.java
deleted file mode 100644
index 0313766..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyForwardServlet.java
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfsproxy;
-
-import java.io.IOException;
-
-import javax.servlet.RequestDispatcher;
-import javax.servlet.ServletContext;
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * 
- *
- */
-public class ProxyForwardServlet extends HttpServlet {
-  /**
-   * 
-   */
-  private static final long serialVersionUID = 1L;
-  private static Configuration configuration = null;
-  public static final Log LOG = LogFactory.getLog(ProxyForwardServlet.class);
-
-  /** {@inheritDoc} */
-  @Override
-  public void init() throws ServletException {
-    ServletContext context = getServletContext();
-    configuration = (Configuration) context
-        .getAttribute("org.apache.hadoop.hdfsproxy.conf");
-  }
-
-  /** {@inheritDoc} */
-  @Override
-  public void doGet(HttpServletRequest request, HttpServletResponse response)
-      throws IOException, ServletException {
-    String hostname = request.getServerName();
-
-    String version = configuration.get(hostname);
-    if (version == null) {
-      // extract from hostname directly
-      String[] strs = hostname.split("[-\\.]");
-      version = "/" + strs[0];
-    }
-
-    ServletContext curContext = getServletContext();
-    ServletContext dstContext = curContext.getContext(version);
-
-    // avoid infinite forwarding.
-    if (dstContext == null
-        || getServletContext().equals(dstContext)) {
-      LOG.error("Context (" + version
-          + ".war) non-exist or restricted from access");
-      response.sendError(HttpServletResponse.SC_NOT_FOUND);
-      return;
-    }
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("Request to " + hostname +
-          " is forwarded to version " + version);
-    }
-    forwardRequest(request, response, dstContext, request.getServletPath());
-
-  }
-
-  /** {@inheritDoc} */
-  public void forwardRequest(HttpServletRequest request,
-      HttpServletResponse response, ServletContext context, String pathInfo)
-      throws IOException, ServletException {
-    String path = buildForwardPath(request, pathInfo);
-    RequestDispatcher dispatcher = context.getRequestDispatcher(path);
-    if (dispatcher == null) {
-      LOG.info("There was no such dispatcher: " + path);
-      response.sendError(HttpServletResponse.SC_NO_CONTENT);
-      return;
-    }
-    dispatcher.forward(request, response);
-  }
-
-  /** {@inheritDoc} */
-  protected String buildForwardPath(HttpServletRequest request, String pathInfo) {
-    String path = pathInfo;
-    if (request.getPathInfo() != null) {
-      path += request.getPathInfo();
-    }
-    if (request.getQueryString() != null) {
-      path += "?" + request.getQueryString();
-    }
-    return path;
-  }
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyHttpServer.java b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyHttpServer.java
deleted file mode 100644
index 952464e..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyHttpServer.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfsproxy;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.Map;
-
-import javax.servlet.http.HttpServlet;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.HttpServer;
-import org.apache.hadoop.net.NetUtils;
-
-import org.mortbay.jetty.Connector;
-import org.mortbay.jetty.nio.SelectChannelConnector;
-import org.mortbay.jetty.security.SslSocketConnector;
-
-/**
- * Create a Jetty embedded server to answer http/https requests.
- */
-public class ProxyHttpServer extends HttpServer {
-  public static final Log LOG = LogFactory.getLog(ProxyHttpServer.class);
-
-  public ProxyHttpServer(InetSocketAddress addr, Configuration conf)
-      throws IOException {
-    super("", addr.getHostName(), addr.getPort(), 0 <= addr.getPort(), conf);
-  }
-
-  /** {@inheritDoc} */
-  public Connector createBaseListener(Configuration conf)
-      throws IOException {
-    final String sAddr;
-    if (null == (sAddr = conf.get("proxy.http.test.listener.addr"))) {
-      SslSocketConnector sslListener = new SslSocketConnector();
-      sslListener.setKeystore(conf.get("ssl.server.keystore.location"));
-      sslListener.setPassword(conf.get("ssl.server.keystore.password", ""));
-      sslListener.setKeyPassword(conf.get("ssl.server.keystore.keypassword", ""));
-      sslListener.setKeystoreType(conf.get("ssl.server.keystore.type", "jks"));
-      sslListener.setNeedClientAuth(true);
-      System.setProperty("javax.net.ssl.trustStore",
-          conf.get("ssl.server.truststore.location", ""));
-      System.setProperty("javax.net.ssl.trustStorePassword",
-          conf.get("ssl.server.truststore.password", ""));
-      System.setProperty("javax.net.ssl.trustStoreType",
-          conf.get("ssl.server.truststore.type", "jks"));
-      return sslListener;
-    }
-    // unit test
-    InetSocketAddress proxyAddr = NetUtils.createSocketAddr(sAddr);
-    SelectChannelConnector testlistener = new SelectChannelConnector();
-    testlistener.setUseDirectBuffers(false);
-    testlistener.setHost(proxyAddr.getHostName());
-    testlistener.setPort(proxyAddr.getPort());
-    return testlistener;
-  }
-
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyListPathsServlet.java b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyListPathsServlet.java
deleted file mode 100644
index 9ea679a..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyListPathsServlet.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfsproxy;
-
-import javax.servlet.http.HttpServletRequest;
-
-import org.apache.hadoop.hdfs.server.namenode.ListPathsServlet;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.conf.Configuration;
-
-/** {@inheritDoc} */
-public class ProxyListPathsServlet extends ListPathsServlet {
-  /** For java.io.Serializable */
-  private static final long serialVersionUID = 1L;
-
-  /** {@inheritDoc} */
-  @Override
-  protected UserGroupInformation getUGI(HttpServletRequest request,
-                                        Configuration conf) {
-    String userID = (String) request
-        .getAttribute("org.apache.hadoop.hdfsproxy.authorized.userID");
-    return ProxyUtil.getProxyUGIFor(userID);
-  }
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyStreamFile.java b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyStreamFile.java
deleted file mode 100644
index 5335fa7..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyStreamFile.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfsproxy;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.security.PrivilegedExceptionAction;
-
-import javax.servlet.ServletContext;
-import javax.servlet.http.HttpServletRequest;
-
-import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.server.namenode.StreamFile;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.conf.Configuration;
-
-/** {@inheritDoc} */
-public class ProxyStreamFile extends StreamFile {
-  /** For java.io.Serializable */
-  private static final long serialVersionUID = 1L;
-
-  /** {@inheritDoc} */
-  @Override
-  protected DFSClient getDFSClient(HttpServletRequest request)
-      throws IOException, InterruptedException {
-    ServletContext context = getServletContext();
-    final Configuration conf =
-        (Configuration) context.getAttribute("name.conf");
-    final InetSocketAddress nameNodeAddr =
-        (InetSocketAddress) context.getAttribute("name.node.address");
-
-    DFSClient client = getUGI(request, conf).doAs
-        ( new PrivilegedExceptionAction<DFSClient>() {
-          @Override
-          public DFSClient run() throws IOException {
-            return new DFSClient(nameNodeAddr, conf);
-          }
-        });
-
-    return client;
-  }
-
-  /** {@inheritDoc} */
-  @Override
-  protected UserGroupInformation getUGI(HttpServletRequest request,
-                                        Configuration conf) {
-    String userID = (String) request
-        .getAttribute("org.apache.hadoop.hdfsproxy.authorized.userID");
-    return ProxyUtil.getProxyUGIFor(userID);
-  }
-
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java b/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java
deleted file mode 100644
index 2f4b7c5..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/java/org/apache/hadoop/hdfsproxy/ProxyUtil.java
+++ /dev/null
@@ -1,358 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfsproxy;
-
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.HttpURLConnection;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URL;
-import java.security.KeyStore;
-import java.security.cert.X509Certificate;
-import java.util.Date;
-import java.util.Set;
-
-import javax.net.ssl.HostnameVerifier;
-import javax.net.ssl.HttpsURLConnection;
-import javax.net.ssl.KeyManager;
-import javax.net.ssl.KeyManagerFactory;
-import javax.net.ssl.SSLContext;
-import javax.net.ssl.SSLSession;
-import javax.net.ssl.TrustManager;
-import javax.net.ssl.TrustManagerFactory;
-import javax.net.ssl.X509TrustManager;
-import javax.servlet.http.HttpServletResponse;
-import javax.servlet.ServletException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSInputStream;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.util.HostsFileReader;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-
-/**
- * Proxy Utility .
- */
-public class ProxyUtil {
-  public static final Log LOG = LogFactory.getLog(ProxyUtil.class);
-  private static final long MM_SECONDS_PER_DAY = 1000 * 60 * 60 * 24;
-  private static final int CERT_EXPIRATION_WARNING_THRESHOLD = 30; // 30 days
-
-  // warning
-
-  private static enum UtilityOption {
-    RELOAD("-reloadPermFiles"), GET("-get"), CHECKCERTS(
-        "-checkcerts");
-
-    private String name = null;
-
-    private UtilityOption(String arg) {
-      this.name = arg;
-    }
-
-    public String getName() {
-      return name;
-    }
-  }
-
-  /**
-   * Dummy hostname verifier that is used to bypass hostname checking
-   */
-  private static class DummyHostnameVerifier implements HostnameVerifier {
-    public boolean verify(String hostname, SSLSession session) {
-      return true;
-    }
-  }
-
-  /**
-   * Dummy trustmanager that is used to bypass server certificate checking
-   */
-  private static class DummyTrustManager implements X509TrustManager {
-    public void checkClientTrusted(X509Certificate[] chain, String authType) {
-    }
-
-    public void checkServerTrusted(X509Certificate[] chain, String authType) {
-    }
-
-    public X509Certificate[] getAcceptedIssuers() {
-      return null;
-    }
-  }
-
-  private static HttpsURLConnection openConnection(String hostname, int port,
-      String path) throws IOException {
-    try {
-      final URL url = new URI("https", null, hostname, port, path, null, null)
-          .toURL();
-      HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
-      // bypass hostname verification
-      conn.setHostnameVerifier(new DummyHostnameVerifier());
-      conn.setRequestMethod("GET");
-      return conn;
-    } catch (URISyntaxException e) {
-      throw (IOException) new IOException().initCause(e);
-    }
-  }
-
-  private static void setupSslProps(Configuration conf) throws IOException {
-    FileInputStream fis = null;
-    try {
-      SSLContext sc = SSLContext.getInstance("SSL");
-      KeyManager[] kms = null;
-      TrustManager[] tms = null;
-      if (conf.get("ssl.client.keystore.location") != null) {
-        // initialize default key manager with keystore file and pass
-        KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509");
-        KeyStore ks = KeyStore.getInstance(conf.get("ssl.client.keystore.type",
-            "JKS"));
-        char[] ksPass = conf.get("ssl.client.keystore.password", "changeit")
-            .toCharArray();
-        fis = new FileInputStream(conf.get("ssl.client.keystore.location",
-            "keystore.jks"));
-        ks.load(fis, ksPass);
-        kmf.init(ks, conf.get("ssl.client.keystore.keypassword", "changeit")
-            .toCharArray());
-        kms = kmf.getKeyManagers();
-        fis.close();
-        fis = null;
-      }
-      // initialize default trust manager with keystore file and pass
-      if (conf.getBoolean("ssl.client.do.not.authenticate.server", false)) {
-        // by pass trustmanager validation
-        tms = new DummyTrustManager[] { new DummyTrustManager() };
-      } else {
-        TrustManagerFactory tmf = TrustManagerFactory.getInstance("PKIX");
-        KeyStore ts = KeyStore.getInstance(conf.get(
-            "ssl.client.truststore.type", "JKS"));
-        char[] tsPass = conf.get("ssl.client.truststore.password", "changeit")
-            .toCharArray();
-        fis = new FileInputStream(conf.get("ssl.client.truststore.location",
-            "truststore.jks"));
-        ts.load(fis, tsPass);
-        tmf.init(ts);
-        tms = tmf.getTrustManagers();
-      }
-      sc.init(kms, tms, new java.security.SecureRandom());
-      HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory());
-    } catch (Exception e) {
-      throw new IOException("Could not initialize SSLContext", e);
-    } finally {
-      if (fis != null) {
-        fis.close();
-      }
-    }
-  }
-
-  static InetSocketAddress getSslAddr(Configuration conf) throws IOException {
-    String addr = conf.get("hdfsproxy.https.address");
-    if (addr == null)
-      throw new IOException("HdfsProxy address is not specified");
-    return NetUtils.createSocketAddr(addr);
-  }
-
-  static boolean sendCommand(Configuration conf, String path)
-      throws IOException {
-    setupSslProps(conf);
-    int sslPort = getSslAddr(conf).getPort();
-    int err = 0;
-    StringBuilder b = new StringBuilder();
-
-    HostsFileReader hostsReader = new HostsFileReader(conf.get(
-        "hdfsproxy.hosts", "hdfsproxy-hosts"), "");
-    Set<String> hostsList = hostsReader.getHosts();
-    for (String hostname : hostsList) {
-      HttpsURLConnection connection = null;
-      try {
-        connection = openConnection(hostname, sslPort, path);
-        connection.connect();
-        if (LOG.isDebugEnabled()) {
-          StringBuilder sb = new StringBuilder();
-          X509Certificate[] clientCerts = (X509Certificate[]) connection
-              .getLocalCertificates();
-          if (clientCerts != null) {
-            for (X509Certificate cert : clientCerts)
-              sb.append("\n Client certificate Subject Name is "
-                  + cert.getSubjectX500Principal().getName());
-          } else {
-            sb.append("\n No client certificates were found");
-          }
-          X509Certificate[] serverCerts = (X509Certificate[]) connection
-              .getServerCertificates();
-          if (serverCerts != null) {
-            for (X509Certificate cert : serverCerts)
-              sb.append("\n Server certificate Subject Name is "
-                  + cert.getSubjectX500Principal().getName());
-          } else {
-            sb.append("\n No server certificates were found");
-          }
-          LOG.debug(sb.toString());
-        }
-        if (connection.getResponseCode() != HttpServletResponse.SC_OK) {
-          b.append("\n\t" + hostname + ": " + connection.getResponseCode()
-              + " " + connection.getResponseMessage());
-          err++;
-        }
-      } catch (IOException e) {
-        b.append("\n\t" + hostname + ": " + e.getLocalizedMessage());
-        if (LOG.isDebugEnabled())
-          LOG.debug("Exception happend for host " + hostname, e);
-        err++;
-      } finally {
-        if (connection != null)
-          connection.disconnect();
-      }
-    }
-    if (err > 0) {
-      System.err.print("Command failed on the following " + err + " host"
-          + (err == 1 ? ":" : "s:") + b.toString() + "\n");
-      return false;
-    }
-    return true;
-  }
-
-  static FSDataInputStream open(Configuration conf, String hostname, int port,
-      String path) throws IOException {
-    setupSslProps(conf);
-    HttpURLConnection connection = null;
-    connection = openConnection(hostname, port, path);
-    connection.connect();
-    final InputStream in = connection.getInputStream();
-    return new FSDataInputStream(new FSInputStream() {
-      public int read() throws IOException {
-        return in.read();
-      }
-
-      public int read(byte[] b, int off, int len) throws IOException {
-        return in.read(b, off, len);
-      }
-
-      public void close() throws IOException {
-        in.close();
-      }
-
-      public void seek(long pos) throws IOException {
-        throw new IOException("Can't seek!");
-      }
-
-      public long getPos() throws IOException {
-        throw new IOException("Position unknown!");
-      }
-
-      public boolean seekToNewSource(long targetPos) throws IOException {
-        return false;
-      }
-    });
-  }
-
-  static void checkServerCertsExpirationDays(Configuration conf,
-      String hostname, int port) throws IOException {
-    setupSslProps(conf);
-    HttpsURLConnection connection = null;
-    connection = openConnection(hostname, port, null);
-    connection.connect();
-    X509Certificate[] serverCerts = (X509Certificate[]) connection
-        .getServerCertificates();
-    Date curDate = new Date();
-    long curTime = curDate.getTime();
-    if (serverCerts != null) {
-      for (X509Certificate cert : serverCerts) {
-        StringBuilder sb = new StringBuilder();
-        sb.append("\n Server certificate Subject Name: "
-            + cert.getSubjectX500Principal().getName());
-        Date expDate = cert.getNotAfter();
-        long expTime = expDate.getTime();
-        int dayOffSet = (int) ((expTime - curTime) / MM_SECONDS_PER_DAY);
-        sb.append(" have " + dayOffSet + " days to expire");
-        if (dayOffSet < CERT_EXPIRATION_WARNING_THRESHOLD)
-          LOG.warn(sb.toString());
-        else
-          LOG.info(sb.toString());
-      }
-    } else {
-      LOG.info("\n No Server certs was found");
-    }
-
-    if (connection != null) {
-      connection.disconnect();
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    if (args.length < 1
-        || (!UtilityOption.RELOAD.getName().equalsIgnoreCase(args[0])
-            && !UtilityOption.GET.getName().equalsIgnoreCase(args[0]) && !UtilityOption.CHECKCERTS
-            .getName().equalsIgnoreCase(args[0]))
-        || (UtilityOption.GET.getName().equalsIgnoreCase(args[0]) && args.length != 4)
-        || (UtilityOption.CHECKCERTS.getName().equalsIgnoreCase(args[0]) && args.length != 3)) {
-      System.err.println("Usage: ProxyUtil [" + UtilityOption.RELOAD.getName()
-          + "] | ["
-          + UtilityOption.GET.getName() + " <hostname> <#port> <path> ] | ["
-          + UtilityOption.CHECKCERTS.getName() + " <hostname> <#port> ]");
-      System.exit(0);
-    }
-    Configuration conf = new HdfsConfiguration(false);
-    conf.addResource("ssl-client.xml");
-    conf.addResource("hdfsproxy-default.xml");
-
-    if (UtilityOption.RELOAD.getName().equalsIgnoreCase(args[0])) {
-      // reload user-certs.xml and user-permissions.xml files
-      sendCommand(conf, "/reloadPermFiles");
-    } else if (UtilityOption.CHECKCERTS.getName().equalsIgnoreCase(args[0])) {
-      checkServerCertsExpirationDays(conf, args[1], Integer.parseInt(args[2]));
-    } else {
-      String hostname = args[1];
-      int port = Integer.parseInt(args[2]);
-      String path = args[3];
-      InputStream in = open(conf, hostname, port, path);
-      IOUtils.copyBytes(in, System.out, conf, false);
-      in.close();
-    }
-  }
-
-  public static String getNamenode(Configuration conf)
-      throws ServletException {
-    String namenode = conf.get("fs.default.name");
-    if (namenode == null) {
-      throw new
-          ServletException("Proxy source cluster name node address missing");
-    }
-    return namenode;
-  }
-
-  public static UserGroupInformation getProxyUGIFor(String userID) {
-    try {
-      return UserGroupInformation.
-          createProxyUser(userID, UserGroupInformation.getLoginUser());
-    } catch (IOException e) {
-      throw new
-          RuntimeException("Unable get current logged in user", e);
-    }
-  }
-
-
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/DummyLdapContext.java b/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/DummyLdapContext.java
deleted file mode 100644
index 8e5848a..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/DummyLdapContext.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfsproxy;
-
-import java.util.ArrayList;
-import java.util.Hashtable;
-
-import javax.naming.NamingEnumeration;
-import javax.naming.NamingException;
-import javax.naming.directory.Attribute;
-import javax.naming.directory.Attributes;
-import javax.naming.directory.BasicAttribute;
-import javax.naming.directory.BasicAttributes;
-import javax.naming.directory.SearchResult;
-import javax.naming.ldap.Control;
-import javax.naming.ldap.InitialLdapContext;
-
-class DummyLdapContext extends InitialLdapContext {
-  class ResultEnum<T> implements NamingEnumeration<T> {
-    private ArrayList<T> rl;
-
-    public ResultEnum() {
-      rl = new ArrayList<T>();
-    }
-
-    public ResultEnum(ArrayList<T> al) {
-      rl = al;
-    }
-
-    public boolean hasMoreElements() {
-      return !rl.isEmpty();
-    }
-
-    public T nextElement() {
-      T t = rl.get(0);
-      rl.remove(0);
-      return t;
-    }
-
-    public boolean hasMore() throws NamingException {
-      return !rl.isEmpty();
-    }
-
-    public T next() throws NamingException {
-      T t = rl.get(0);
-      rl.remove(0);
-      return t;
-    }
-
-    public void close() throws NamingException {
-    }
-  }
-
-  public DummyLdapContext() throws NamingException {
-  }
-
-  public DummyLdapContext(Hashtable<?, ?> environment, Control[] connCtls)
-      throws NamingException {
-  }
-
-  public NamingEnumeration<SearchResult> search(String name,
-      Attributes matchingAttributes, String[] attributesToReturn)
-      throws NamingException {
-    System.out.println("Searching Dummy LDAP Server Results:");
-    if (!"ou=proxyroles,dc=mycompany,dc=com".equalsIgnoreCase(name)) {
-      System.out.println("baseName mismatch");
-      return new ResultEnum<SearchResult>();
-    }
-    if (!"cn=127.0.0.1".equals((String) matchingAttributes.get("uniqueMember")
-        .get())) {
-      System.out.println("Ip address mismatch");
-      return new ResultEnum<SearchResult>();
-    }
-    BasicAttributes attrs = new BasicAttributes();
-    BasicAttribute uidAttr = new BasicAttribute("uid", "testuser");
-    attrs.put(uidAttr);
-    BasicAttribute groupAttr = new BasicAttribute("userClass", "testgroup");
-    attrs.put(groupAttr);
-    BasicAttribute locAttr = new BasicAttribute("documentLocation", "/testdir");
-    attrs.put(locAttr);
-    SearchResult sr = new SearchResult(null, null, attrs);
-    ArrayList<SearchResult> al = new ArrayList<SearchResult>();
-    al.add(sr);
-    NamingEnumeration<SearchResult> ne = new ResultEnum<SearchResult>(al);
-    return ne;
-  }
-
-  @SuppressWarnings("unchecked")
-  public static void main(String[] args) throws Exception {
-    DummyLdapContext dlc = new DummyLdapContext();
-    String baseName = "ou=proxyroles,dc=mycompany,dc=com";
-    Attributes matchAttrs = new BasicAttributes(true);
-    String[] attrIDs = { "uid", "documentLocation" };
-    NamingEnumeration<SearchResult> results = dlc.search(baseName, matchAttrs,
-        attrIDs);
-    if (results.hasMore()) {
-      SearchResult sr = results.next();
-      Attributes attrs = sr.getAttributes();
-      for (NamingEnumeration ne = attrs.getAll(); ne.hasMore();) {
-        Attribute attr = (Attribute) ne.next();
-        if ("uid".equalsIgnoreCase(attr.getID())) {
-          System.out.println("User ID = " + attr.get());
-        } else if ("documentLocation".equalsIgnoreCase(attr.getID())) {
-          System.out.println("Document Location = ");
-          for (NamingEnumeration e = attr.getAll(); e.hasMore();) {
-            System.out.println(e.next());
-          }
-        }
-      }
-    }
-  }
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/FindFreePort.java b/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/FindFreePort.java
deleted file mode 100644
index b4dde89..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/FindFreePort.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfsproxy;
-
-import java.net.ServerSocket;
-import java.io.IOException;
-import java.net.BindException;
-import java.util.Random;
-
-public class FindFreePort {
-  private static final int MIN_AVAILABLE_PORT = 10000;
-  private static final int MAX_AVAILABLE_PORT = 65535;
-  private static Random random = new Random();
-  /**
-   * 
-   * @param num <= 0, find a single free port
-   * @return free port next to port (>port)
-   * @throws IOException
-   */
-  public static int findFreePort(int port) throws IOException {
-    ServerSocket server;
-    if (port < 0) {
-      server =  new ServerSocket(0);      
-    } else {
-      int freePort = port+1;
-      while (true) {
-        try {
-          server =  new ServerSocket(freePort);
-          break;
-        } catch (IOException e) {
-          if (e instanceof BindException) {
-            if (freePort >= MAX_AVAILABLE_PORT || 
-                freePort < MIN_AVAILABLE_PORT) {
-              throw e;
-            }
-          } else {
-            throw e;
-          }
-          freePort += 1;
-        }
-      }
-    }
-    int fport = server.getLocalPort();
-    server.close();
-    return fport;    
-  }
- /**
-  * 
-  * @return
-  * @throws IOException
-  */
-  public static int findFreePortRandom() throws IOException {
-    return findFreePort(MIN_AVAILABLE_PORT + random.nextInt(MAX_AVAILABLE_PORT - MIN_AVAILABLE_PORT + 1));
-  }
-   
-
-  public static void main(String[] args) throws Exception {
-    if(args.length < 1) {       
-      System.err.println("Usage: FindFreePort < -random / <#port> >");        
-      System.exit(0);      
-    }
-    int j = 0;
-    String cmd = args[j++];
-    if ("-random".equals(cmd)) {
-      System.out.println(findFreePortRandom());
-    } else {
-      System.out.println(findFreePort(Integer.parseInt(cmd)));
-    }   
-  }
-        
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/SimpleServlet.java b/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/SimpleServlet.java
deleted file mode 100644
index e0b8a03..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/SimpleServlet.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfsproxy;
-
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-
-import java.io.PrintWriter;
-import java.io.IOException;
-
-
-/**
- * simple servlet for forward testing purpose
- */
-
-public class SimpleServlet extends HttpServlet {
- 
-  /**
-   * 
-   */
-  private static final long serialVersionUID = 1L;
-
-  public void doGet(HttpServletRequest request, HttpServletResponse response)
-    throws IOException {
-	  response.setContentType("text/html");
-	  PrintWriter out = response.getWriter();	  
-	  out.print("<html><head/><body>");
-    out.print("A GET request");
-    out.print("</body></html>");
-	  out.close();
-	  return;
-  }
-
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestAuthorizationFilter.java b/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestAuthorizationFilter.java
deleted file mode 100644
index 377ee1b..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestAuthorizationFilter.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfsproxy;
-
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.List;
-import java.util.ArrayList;
-
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-
-import org.apache.cactus.FilterTestCase;
-import org.apache.cactus.WebRequest;
-import org.apache.cactus.WebResponse;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.Path;
-
-public class TestAuthorizationFilter extends FilterTestCase {
-
-  public static final Log LOG = LogFactory.getLog(TestAuthorizationFilter.class);
-
-  private class DummyFilterChain implements FilterChain {
-    public void doFilter(ServletRequest theRequest, ServletResponse theResponse)
-        throws IOException, ServletException {
-      PrintWriter writer = theResponse.getWriter();
-
-      writer.print("<p>some content</p>");
-      writer.close();
-    }
-
-    public void init(FilterConfig theConfig) {
-    }
-
-    public void destroy() {
-    }
-  }
-
-  private class ConfiguredAuthorizationFilter extends AuthorizationFilter {
-    
-    private ConfiguredAuthorizationFilter(String nameNode) {
-      this.namenode = nameNode;
-    }  
-  }
-
-  public void beginPathRestriction(WebRequest theRequest) {
-    theRequest.setURL("proxy-test:0", null, "/streamFile/nontestdir",
-      null,null);
-  }
-
-  public void testPathRestriction() throws ServletException, IOException {
-    AuthorizationFilter filter = new 
-        ConfiguredAuthorizationFilter("hdfs://apache.org");
-    request.setRemoteIPAddress("127.0.0.1");
-    request.setAttribute("org.apache.hadoop.hdfsproxy.authorized.userID",
-        System.getProperty("user.name"));
-    List<Path> paths = new ArrayList<Path>();
-    paths.add(new Path("/deny"));
-    paths.add(new Path("hdfs://test:100/deny"));
-    paths.add(new Path("hdfs://test/deny"));
-    request.setAttribute("org.apache.hadoop.hdfsproxy.authorized.paths",
-        paths);
-    FilterChain mockFilterChain = new DummyFilterChain();
-    filter.doFilter(request, response, mockFilterChain);
-  }
-
-  public void endPathRestriction(WebResponse theResponse) {
-    assertEquals(theResponse.getStatusCode(), 403);
-    assertTrue("Text missing 'User not authorized to access path' : : ["
-        + theResponse.getText() + "]", theResponse.getText().indexOf(
-        "is not authorized to access path") > 0);
-  }
-
-  public void beginPathPermit(WebRequest theRequest) {
-    theRequest.setURL("proxy-test:0", null, "/streamFile/data/file",
-      null, null);
-  }
-
-  public void testPathPermit() throws ServletException, IOException {
-    AuthorizationFilter filter = new 
-        ConfiguredAuthorizationFilter("hdfs://apache.org");
-    request.setRemoteIPAddress("127.0.0.1");
-    request.setAttribute("org.apache.hadoop.hdfsproxy.authorized.userID",
-        System.getProperty("user.name"));
-    List<Path> paths = new ArrayList<Path>();
-    paths.add(new Path("/data"));
-    request.setAttribute("org.apache.hadoop.hdfsproxy.authorized.paths",
-        paths);
-    FilterChain mockFilterChain = new DummyFilterChain();
-    filter.doFilter(request, response, mockFilterChain);
-  }
-
-  public void endPathPermit(WebResponse theResponse) {
-    assertEquals(theResponse.getStatusCode(), 200);
-  }
-
-  public void beginPathPermitQualified(WebRequest theRequest) {
-    theRequest.setURL("proxy-test:0", null, "/streamFile/data/file",
-      null, null);
-  }
-
-  public void testPathPermitQualified() throws ServletException, IOException {
-    AuthorizationFilter filter = new 
-        ConfiguredAuthorizationFilter("hdfs://apache.org");
-    request.setRemoteIPAddress("127.0.0.1");
-    request.setAttribute("org.apache.hadoop.hdfsproxy.authorized.userID",
-        System.getProperty("user.name"));
-    List<Path> paths = new ArrayList<Path>();
-    paths.add(new Path("hdfs://apache.org/data"));
-    request.setAttribute("org.apache.hadoop.hdfsproxy.authorized.paths",
-        paths);
-    FilterChain mockFilterChain = new DummyFilterChain();
-    filter.doFilter(request, response, mockFilterChain);
-  }
-
-  public void endPathPermitQualified(WebResponse theResponse) {
-    assertEquals(theResponse.getStatusCode(), 200);
-  }
-  
-  public void beginPathQualifiediReject(WebRequest theRequest) {
-    theRequest.setURL("proxy-test:0", null, "/streamFile/data/file",
-      null, null);
-  }
-
-  public void testPathQualifiedReject() throws ServletException, IOException {
-    AuthorizationFilter filter = new 
-        ConfiguredAuthorizationFilter("hdfs://apache.org:1111");
-    request.setRemoteIPAddress("127.0.0.1");
-    request.setAttribute("org.apache.hadoop.hdfsproxy.authorized.userID",
-        System.getProperty("user.name"));
-    List<Path> paths = new ArrayList<Path>();
-    paths.add(new Path("hdfs://apache.org:2222/data"));
-    request.setAttribute("org.apache.hadoop.hdfsproxy.authorized.paths",
-        paths);
-    FilterChain mockFilterChain = new DummyFilterChain();
-    filter.doFilter(request, response, mockFilterChain);
-  }
-
-  public void endPathQualifiedReject(WebResponse theResponse) {
-    assertEquals(theResponse.getStatusCode(), 403);
-  }
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java b/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java
deleted file mode 100644
index dd5bd52..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java
+++ /dev/null
@@ -1,272 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfsproxy;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.util.Random;
-
-import junit.framework.TestCase;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.log4j.Level;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-
-/**
- * A JUnit test for HdfsProxy
- */
-public class TestHdfsProxy extends TestCase {
-  {
-    ((Log4JLogger) LogFactory.getLog("org.apache.hadoop.hdfs.StateChange"))
-        .getLogger().setLevel(Level.OFF);
-    ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.OFF);
-    ((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.OFF);
-  }
-
-  static final URI LOCAL_FS = URI.create("file:///");
-
-  private static final int NFILES = 10;
-  private static String TEST_ROOT_DIR = new Path(System.getProperty(
-      "test.build.data", "/tmp")).toString().replace(' ', '+');
-
-  /**
-   * class MyFile contains enough information to recreate the contents of a
-   * single file.
-   */
-  private static class MyFile {
-    private static Random gen = new Random();
-    private static final int MAX_LEVELS = 3;
-    private static final int MAX_SIZE = 8 * 1024;
-    private static String[] dirNames = { "zero", "one", "two", "three", "four",
-        "five", "six", "seven", "eight", "nine" };
-    private final String name;
-    private int size = 0;
-    private long seed = 0L;
-
-    MyFile() {
-      this(gen.nextInt(MAX_LEVELS));
-    }
-
-    MyFile(int nLevels) {
-      String xname = "";
-      if (nLevels != 0) {
-        int[] levels = new int[nLevels];
-        for (int idx = 0; idx < nLevels; idx++) {
-          levels[idx] = gen.nextInt(10);
-        }
-        StringBuffer sb = new StringBuffer();
-        for (int idx = 0; idx < nLevels; idx++) {
-          sb.append(dirNames[levels[idx]]);
-          sb.append("/");
-        }
-        xname = sb.toString();
-      }
-      long fidx = gen.nextLong() & Long.MAX_VALUE;
-      name = xname + Long.toString(fidx);
-      reset();
-    }
-
-    void reset() {
-      final int oldsize = size;
-      do {
-        size = gen.nextInt(MAX_SIZE);
-      } while (oldsize == size);
-      final long oldseed = seed;
-      do {
-        seed = gen.nextLong() & Long.MAX_VALUE;
-      } while (oldseed == seed);
-    }
-
-    String getName() {
-      return name;
-    }
-
-    int getSize() {
-      return size;
-    }
-
-    long getSeed() {
-      return seed;
-    }
-  }
-
-  private static MyFile[] createFiles(URI fsname, String topdir)
-      throws IOException {
-    return createFiles(FileSystem.get(fsname, new HdfsConfiguration()), topdir);
-  }
-
-  /**
-   * create NFILES with random names and directory hierarchies with random (but
-   * reproducible) data in them.
-   */
-  private static MyFile[] createFiles(FileSystem fs, String topdir)
-      throws IOException {
-    Path root = new Path(topdir);
-    MyFile[] files = new MyFile[NFILES];
-    for (int i = 0; i < NFILES; i++) {
-      files[i] = createFile(root, fs);
-    }
-    return files;
-  }
-
-  private static MyFile createFile(Path root, FileSystem fs, int levels)
-      throws IOException {
-    MyFile f = levels < 0 ? new MyFile() : new MyFile(levels);
-    Path p = new Path(root, f.getName());
-    FSDataOutputStream out = fs.create(p);
-    byte[] toWrite = new byte[f.getSize()];
-    new Random(f.getSeed()).nextBytes(toWrite);
-    out.write(toWrite);
-    out.close();
-    FileSystem.LOG.info("created: " + p + ", size=" + f.getSize());
-    return f;
-  }
-
-  private static MyFile createFile(Path root, FileSystem fs) throws IOException {
-    return createFile(root, fs, -1);
-  }
-
-  private static boolean checkFiles(FileSystem fs, String topdir, MyFile[] files)
-      throws IOException {
-    return checkFiles(fs, topdir, files, false);
-  }
-
-  private static boolean checkFiles(FileSystem fs, String topdir,
-      MyFile[] files, boolean existingOnly) throws IOException {
-    Path root = new Path(topdir);
-
-    for (int idx = 0; idx < files.length; idx++) {
-      Path fPath = new Path(root, files[idx].getName());
-      try {
-        fs.getFileStatus(fPath);
-        FSDataInputStream in = fs.open(fPath);
-        byte[] toRead = new byte[files[idx].getSize()];
-        byte[] toCompare = new byte[files[idx].getSize()];
-        Random rb = new Random(files[idx].getSeed());
-        rb.nextBytes(toCompare);
-        assertEquals("Cannnot read file.", toRead.length, in.read(toRead));
-        in.close();
-        for (int i = 0; i < toRead.length; i++) {
-          if (toRead[i] != toCompare[i]) {
-            return false;
-          }
-        }
-        toRead = null;
-        toCompare = null;
-      } catch (FileNotFoundException fnfe) {
-        if (!existingOnly) {
-          throw fnfe;
-        }
-      }
-    }
-
-    return true;
-  }
-
-  /** delete directory and everything underneath it. */
-  private static void deldir(FileSystem fs, String topdir) throws IOException {
-    fs.delete(new Path(topdir), true);
-  }
-
-  /** verify hdfsproxy implements the hftp interface */
-  public void testHdfsProxyInterface() throws Exception {
-    MiniDFSCluster cluster = null;
-    HdfsProxy proxy = null;
-    try {
-      final UserGroupInformation CLIENT_UGI = UserGroupInformation.getCurrentUser();
-      final String testUser = CLIENT_UGI.getShortUserName();
-      final String testGroup = CLIENT_UGI.getGroupNames()[0];
-
-      final Configuration dfsConf = new HdfsConfiguration();
-      dfsConf.set("hadoop.proxyuser." + testUser +
-          ".groups", testGroup);
-      dfsConf.set("hadoop.proxyuser." + testGroup + ".hosts",
-          "localhost,127.0.0.1");
-      dfsConf.set("hadoop.proxyuser." + testUser +
-          ".hosts", "localhost,127.0.0.1");
-      cluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(2).build();
-      cluster.waitActive();
-
-      final FileSystem localfs = FileSystem.get(LOCAL_FS, dfsConf);
-      final FileSystem hdfs = cluster.getFileSystem();
-      final Configuration proxyConf = new HdfsConfiguration(false);
-      proxyConf.set("hdfsproxy.dfs.namenode.address", hdfs.getUri().getHost() + ":"
-          + hdfs.getUri().getPort());
-      proxyConf.set("hdfsproxy.https.address", "localhost:0");
-      final String namenode = hdfs.getUri().toString();
-      if (namenode.startsWith("hdfs://")) {
-        MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR + "/srcdat");
-        hdfs.copyFromLocalFile
-	    (new Path("file:///" + TEST_ROOT_DIR + "/srcdat"),
-             new Path(namenode + "/destdat" ));
-        assertTrue("Source and destination directories do not match.",
-            checkFiles(hdfs, "/destdat", files));
-
-        proxyConf.set("proxy.http.test.listener.addr", "localhost:0");
-        proxy = new HdfsProxy(proxyConf);
-        proxy.start();
-        InetSocketAddress proxyAddr = NetUtils.createSocketAddr("localhost:0");
-        final String realProxyAddr = proxyAddr.getHostName() + ":"
-            + proxy.getPort();
-        final Path proxyUrl = new Path("hftp://" + realProxyAddr);
-	final FileSystem hftp = proxyUrl.getFileSystem(dfsConf);
-        FileUtil.copy(hftp, new Path(proxyUrl, "/destdat"),
-                      hdfs, new Path(namenode + "/copied1"),
-                      false, true, proxyConf);
-        
-        assertTrue("Source and copied directories do not match.", checkFiles(
-            hdfs, "/copied1", files));
-
-        FileUtil.copy(hftp, new Path(proxyUrl, "/destdat"),
-                      localfs, new Path(TEST_ROOT_DIR + "/copied2"),
-                      false, true, proxyConf);
-        assertTrue("Source and copied directories do not match.", checkFiles(
-            localfs, TEST_ROOT_DIR + "/copied2", files));
-
-        deldir(hdfs, "/destdat");
-        deldir(hdfs, "/logs");
-        deldir(hdfs, "/copied1");
-        deldir(localfs, TEST_ROOT_DIR + "/srcdat");
-        deldir(localfs, TEST_ROOT_DIR + "/copied2");
-      }
-    } finally {
-      if (cluster != null) {
-        cluster.shutdown();
-      }
-      if (proxy != null) {
-        proxy.stop();
-      }
-    }
-  }
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestLdapIpDirFilter.java b/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestLdapIpDirFilter.java
deleted file mode 100644
index 419ede2..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestLdapIpDirFilter.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfsproxy;
-
-import java.io.IOException;
-import java.io.PrintWriter;
-
-import javax.naming.NamingException;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletContext;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-
-import org.apache.cactus.FilterTestCase;
-import org.apache.cactus.WebRequest;
-import org.apache.cactus.WebResponse;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-public class TestLdapIpDirFilter extends FilterTestCase {
-
-  public static final Log LOG = LogFactory.getLog(TestLdapIpDirFilter.class);
-
-  private class DummyFilterChain implements FilterChain {
-    public void doFilter(ServletRequest theRequest, ServletResponse theResponse)
-        throws IOException, ServletException {
-      PrintWriter writer = theResponse.getWriter();
-
-      writer.print("<p>some content</p>");
-      writer.close();
-    }
-
-    public void init(FilterConfig theConfig) {
-    }
-
-    public void destroy() {
-    }
-  }
-
-  public void testIpRestriction() throws ServletException, IOException,
-      NamingException {
-    LdapIpDirFilter filter = new LdapIpDirFilter();
-    String baseName = "ou=proxyroles,dc=mycompany,dc=com";
-    DummyLdapContext dlc = new DummyLdapContext();
-    filter.initialize(baseName, dlc);
-    request.setRemoteIPAddress("127.0.0.2");
-    request.removeAttribute("org.apache.hadoop.hdfsproxy.authorized.userID");
-    FilterChain mockFilterChain = new DummyFilterChain();
-    filter.doFilter(request, response, mockFilterChain);
-    assertNull(request
-        .getAttribute("org.apache.hadoop.hdfsproxy.authorized.userID"));
-  }
-
-  public void endIpRestriction(WebResponse theResponse) {
-    assertEquals(theResponse.getStatusCode(), 403);
-    assertTrue("Text missing 'IP not authorized to access' : : ["
-        + theResponse.getText() + "]", theResponse.getText().indexOf(
-        "not authorized to access") > 0);
-  }
-
-  public void beginDoFilter(WebRequest theRequest) {
-    theRequest.setURL("proxy-test:0", null, "/streamFile/testdir",
-      null, null);
-  }
-
-  public void testDoFilter() throws ServletException, IOException,
-      NamingException {
-    LdapIpDirFilter filter = new LdapIpDirFilter();
-    String baseName = "ou=proxyroles,dc=mycompany,dc=com";
-    DummyLdapContext dlc = new DummyLdapContext();
-    filter.initialize(baseName, dlc);
-    request.setRemoteIPAddress("127.0.0.1");
-
-    ServletContext context = config.getServletContext();
-    context.removeAttribute("name.node.address");
-    context.removeAttribute("name.conf");
-    assertNull(context.getAttribute("name.node.address"));
-    assertNull(context.getAttribute("name.conf"));
-    filter.init(config);
-    assertNotNull(context.getAttribute("name.node.address"));
-    assertNotNull(context.getAttribute("name.conf"));
-
-    request.removeAttribute("org.apache.hadoop.hdfsproxy.authorized.userID");
-    FilterChain mockFilterChain = new DummyFilterChain();
-    filter.doFilter(request, response, mockFilterChain);
-    assertEquals(request
-        .getAttribute("org.apache.hadoop.hdfsproxy.authorized.userID"),
-        "testuser");
-
-  }
-
-  public void endDoFilter(WebResponse theResponse) {
-    assertEquals("<p>some content</p>", theResponse.getText());
-  }
-
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyFilter.java b/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyFilter.java
deleted file mode 100644
index 61c0412..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyFilter.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfsproxy;
-
-import java.io.IOException;
-import java.io.PrintWriter;
-
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.ServletContext;
-
-import org.apache.cactus.FilterTestCase;
-import org.apache.cactus.WebRequest;
-import org.apache.cactus.WebResponse;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-
-public class TestProxyFilter extends FilterTestCase {
-  
-  public static final Log LOG = LogFactory.getLog(TestProxyFilter.class);
-  
-  private static String TEST_CLIENT_SSL_CERT = System.getProperty("javax.net.ssl.clientCert", 
-  "./src/test/resources/ssl-keys/test.crt");
-  
-  private class DummyFilterChain implements FilterChain {
-    public void doFilter(ServletRequest theRequest, ServletResponse theResponse) 
-      throws IOException, ServletException  {
-      PrintWriter writer = theResponse.getWriter();
-  
-      writer.print("<p>some content</p>");
-      writer.close();
-    }
-  
-    public void init(FilterConfig theConfig) {
-    }
-  
-    public void destroy() {
-    }
-  }
-   
-  public void beginDoFilterHttp(WebRequest theRequest) {
-    theRequest.addParameter("ugi", "nobody,test");
-  }  
-  
-  public void testDoFilterHttp() throws ServletException, IOException  {    
-    ProxyFilter filter = new ProxyFilter();
-    
-    ServletContext context = config.getServletContext();
-    context.removeAttribute("name.node.address");
-    context.removeAttribute("name.conf");
-    assertNull(context.getAttribute("name.node.address"));
-    assertNull(context.getAttribute("name.conf"));
-    
-    filter.init(config);
-    
-    assertNotNull(context.getAttribute("name.node.address"));
-    assertNotNull(context.getAttribute("name.conf"));
-    
-    request.removeAttribute("authorized.ugi");
-    assertNull(request.getAttribute("authorized.ugi"));
-        
-    FilterChain mockFilterChain = new DummyFilterChain();
-    filter.doFilter(request, response, mockFilterChain);    
-    assertEquals(request.getAttribute("authorized.ugi").toString(), "nobody,test");
-    
-  }
-
-  public void endDoFilterHttp(WebResponse theResponse)  {
-    assertEquals("<p>some content</p>", theResponse.getText());    
-  }
-  
-  public void beginDoFilterHttps(WebRequest theRequest) throws Exception{
-    theRequest.addParameter("UnitTest", "true");
-    theRequest.addParameter("SslPath", TEST_CLIENT_SSL_CERT);
-    theRequest.addParameter("ugi", "nobody,test");    
-    theRequest.addParameter("TestSevletPathInfo", "/streamFile");
-    theRequest.addParameter("filename", "/user");
-  }  
-  
-  public void testDoFilterHttps() throws Exception  {    
-    ProxyFilter filter = new ProxyFilter();
-    
-    request.removeAttribute("authorized.ugi");
-    assertNull(request.getAttribute("authorized.ugi"));        
-    
-    FilterChain mockFilterChain = new DummyFilterChain();
-    filter.init(config);
-    filter.doFilter(request, response, mockFilterChain);
-    
-    LOG.info("Finish setting up X509Certificate");  
-    assertEquals(request.getAttribute("authorized.ugi").toString().substring(0, 6), "nobody");
-    
-  }
-
-  public void endDoFilterHttps(WebResponse theResponse)  {
-    assertEquals("<p>some content</p>", theResponse.getText());    
-  }
-  
-    
-}
-
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyForwardServlet.java b/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyForwardServlet.java
deleted file mode 100644
index 466f63d..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyForwardServlet.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfsproxy;
-
-import org.apache.cactus.ServletTestCase;
-import org.apache.cactus.WebRequest;
-import org.apache.cactus.WebResponse;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import java.io.IOException;
-import javax.servlet.ServletException;
-
-/** Unit tests for ProxyUtil */
-public class TestProxyForwardServlet extends ServletTestCase {
-  public static final Log LOG = LogFactory.getLog(TestProxyForwardServlet.class);
-  
- 
-  public void beginDoGet(WebRequest theRequest) {
-    theRequest.setURL("proxy-test:0", null, "/simple", null, null);
-  }
-  
-  public void testDoGet() throws IOException, ServletException {
-    ProxyForwardServlet servlet = new ProxyForwardServlet();
-    
-    servlet.init(config);
-    servlet.doGet(request, response);
-  }
-  
-  public void endDoGet(WebResponse theResponse)
-    throws IOException {
-    String expected = "<html><head/><body>A GET request</body></html>";
-    String result = theResponse.getText();
-
-    assertEquals(expected, result);
-  }
-  
-  
-  public void testForwardRequest() throws Exception  {
-    ProxyForwardServlet servlet = new ProxyForwardServlet();
-
-    servlet.forwardRequest(request, response, config.getServletContext(), "/simple");
-  }
-  
-  public void endForwardRequest(WebResponse theResponse) throws IOException  {
-    String expected = "<html><head/><body>A GET request</body></html>";
-    String result = theResponse.getText();
-    
-    assertEquals(expected, result);
-    
-  } 
- 
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUtil.java b/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUtil.java
deleted file mode 100644
index 444beac..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestProxyUtil.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfsproxy;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-
-/** Unit tests for ProxyUtil */
-public class TestProxyUtil extends TestCase {
-  
-  private static String TEST_PROXY_CONF_DIR = System.getProperty("test.proxy.conf.dir", "./conf");
-  private static String TEST_PROXY_HTTPS_PORT = System.getProperty("test.proxy.https.port", "8443");
-
-  public void testSendCommand() throws Exception {
-      
-    Configuration conf = new HdfsConfiguration(false);  
-    conf.addResource("ssl-client.xml");
-    conf.addResource("hdfsproxy-default.xml");
-    String address = "localhost:" + TEST_PROXY_HTTPS_PORT;
-    conf.set("hdfsproxy.https.address", address);
-    String hostFname = TEST_PROXY_CONF_DIR + "/hdfsproxy-hosts";
-    conf.set("hdfsproxy.hosts", hostFname);    
-    
-    assertTrue(ProxyUtil.sendCommand(conf, "/test/reloadPermFiles"));
-    
-    conf.set("hdfsproxy.https.address", "localhost:7777");
-    assertFalse(ProxyUtil.sendCommand(conf, "/test/reloadPermFiles"));
-    assertFalse(ProxyUtil.sendCommand(conf, "/test/reloadPermFiles"));
-  }
- 
-}
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/cactus-web.xml b/hdfs/src/contrib/hdfsproxy/src/test/resources/cactus-web.xml
deleted file mode 100644
index ddbab81..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/cactus-web.xml
+++ /dev/null
@@ -1,77 +0,0 @@
-<?xml version="1.0" encoding="ISO-8859-1"?>
-<!DOCTYPE web-app
-    PUBLIC "-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN"
-    "http://java.sun.com/dtd/web-app_2_3.dtd">
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<web-app>
-
-    <context-param>
-      <param-name>param</param-name>
-      <param-value>value used for testing</param-value>
-    </context-param>
-    
-    <servlet>
-        <servlet-name>ServletRedirector</servlet-name>
-        <servlet-class>org.apache.cactus.server.ServletTestRedirector</servlet-class>
-        <init-param>
-          <param-name>param1</param-name>
-          <param-value>value1 used for testing</param-value>
-        </init-param>
-    </servlet>
-    
-    <servlet>
-        <servlet-name>ServletRedirector_TestOverride</servlet-name>
-        <servlet-class>org.apache.cactus.server.ServletTestRedirector</servlet-class>
-        <init-param>
-          <param-name>param2</param-name>
-          <param-value>value2 used for testing</param-value>
-        </init-param>
-    </servlet>
-
-    <servlet>
-        <servlet-name>TestJsp</servlet-name>
-        <jsp-file>/test/test.jsp</jsp-file>
-    </servlet>
-
-    <servlet>
-        <servlet-name>JspRedirector</servlet-name>
-        <jsp-file>/jspRedirector.jsp</jsp-file>
-        <init-param>
-          <param-name>param1</param-name>
-          <param-value>value1 used for testing</param-value>
-        </init-param>
-    </servlet>
-		
-		<servlet>
-      <servlet-name>Simple</servlet-name>
-      <description> A Simple Servlet </description>
-      <servlet-class>org.apache.hadoop.hdfsproxy.SimpleServlet</servlet-class>
-    </servlet>
-    
-    <servlet-mapping>
-        <servlet-name>ServletRedirector_TestOverride</servlet-name>
-        <url-pattern>/ServletRedirectorOverride</url-pattern>
-    </servlet-mapping>
-    
-    <servlet-mapping>
-        <servlet-name>Simple</servlet-name>
-        <url-pattern>/simple/*</url-pattern>
-    </servlet-mapping>
-
-</web-app>
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/hdfsproxy-default.xml b/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/hdfsproxy-default.xml
deleted file mode 100644
index b43e74f..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/hdfsproxy-default.xml
+++ /dev/null
@@ -1,104 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- Put hdfsproxy specific properties in this file. -->
-
-<configuration>
-
-<property>
-  <name>hdfsproxy.https.address</name>
-  <value>0.0.0.0:8443</value>
-  <description>the SSL port that hdfsproxy listens on
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.hosts</name>
-  <value>hdfsproxy-hosts</value>
-  <description>location of hdfsproxy-hosts file
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.dfs.namenode.address</name>
-  <value>localhost:54321</value>
-  <description>namenode address of the HDFS cluster being proxied
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.https.server.keystore.resource</name>
-  <value>ssl-server.xml</value>
-  <description>location of the resource from which ssl server keystore
-  information will be extracted
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.user.permissions.file.location</name>
-  <value>user-permissions.xml</value>
-  <description>location of the user permissions file
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.user.certs.file.location</name>
-  <value>user-certs.xml</value>
-  <description>location of the user certs file
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.ugi.cache.ugi.lifetime</name>
-  <value>15</value>
-  <description> The lifetime (in minutes) of a cached ugi
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.ldap.initial.context.factory</name>
-  <value>com.sun.jndi.ldap.LdapCtxFactory</value>
-  <description> ldap initial context factory
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.ldap.provider.url</name>
-  <value>ldap://ldapserver:389</value>
-  <description> ldap server address
-  </description>
-</property>
-
-<property>
-  <name>hdfsproxy.ldap.role.base</name>
-  <value>ou=proxyroles,dc=mycompany,dc=com</value>
-  <description> ldap role base
-  </description>
-</property>
-
-<property>
-  <name>fs.default.name</name>
-  <!-- cluster variant -->
-  <value>hdfs://localhost:8020</value>
-  <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-  <final>true</final>
-</property>
-
-</configuration>
-
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/hdfsproxy-hosts b/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/hdfsproxy-hosts
deleted file mode 100644
index 2fbb50c..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/hdfsproxy-hosts
+++ /dev/null
@@ -1 +0,0 @@
-localhost
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/hdfsproxy-site.xml b/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/hdfsproxy-site.xml
deleted file mode 100644
index 72692e0..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/hdfsproxy-site.xml
+++ /dev/null
@@ -1,15 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put hdfsproxy specific properties in this file. -->
-
-<configuration>
-<property>
-  <name>proxy-test</name>
-  <value>/test</value>
-  <description>one hostname corresponds to one web application archive 
-  </description>
-</property>
-
-</configuration>
-
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/log4j.properties b/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/log4j.properties
deleted file mode 100644
index 54beb3d..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/log4j.properties
+++ /dev/null
@@ -1,76 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hdfsproxy.root.logger=DEBUG,console
-hdfsproxy.log.dir=.
-hdfsproxy.log.file=hdfsproxy.log
-
-# Define the root logger to the system property "hdfsproxy.root.logger".
-log4j.rootLogger=${hdfsproxy.root.logger}
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hdfsproxy.log.dir}/${hdfsproxy.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hdfsproxy.log.dir}/${hdfsproxy.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-# Custom Logging levels
-
-log4j.logger.org.apache.hadoop.hdfsproxy.HttpsProxy=DEBUG
-log4j.logger.org.apache.hadoop.hdfsproxy.ProxyFilter=DEBUG
-log4j.logger.org.apache.hadoop.hdfsproxy.HdfsProxy=DEBUG
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/ssl-client.xml b/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/ssl-client.xml
deleted file mode 100644
index 22c9389..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/ssl-client.xml
+++ /dev/null
@@ -1,57 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration>
-
-<property>
-  <name>ssl.client.truststore.location</name>
-  <value>${javax.net.ssl.keyStore}</value>
-</property>
-
-<property>
-  <name>ssl.client.truststore.password</name>
-  <value>changeme</value>
-</property>
-
-<property>
-  <name>ssl.client.truststore.type</name>
-  <value>jks</value>
-</property>
-
-<property>
-  <name>ssl.client.keystore.location</name>
-  <value>${javax.net.ssl.keyStore}</value>
-</property>
-
-<property>
-  <name>ssl.client.keystore.password</name>
-  <value>changeme</value>
-</property>
-
-<property>
-  <name>ssl.client.keystore.type</name>
-  <value>jks</value>
-</property>
-
-<property>
-  <name>ssl.client.keystore.keypassword</name>
-  <value>changeme</value>
-</property>
-
-</configuration>
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/ssl-server.xml b/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/ssl-server.xml
deleted file mode 100644
index bcc420b..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/ssl-server.xml
+++ /dev/null
@@ -1,47 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration>
-
-<property>
-  <name>ssl.server.truststore.location</name>
-  <value>${javax.net.ssl.keyStore.proxy}</value>
-</property>
-
-<property>
-  <name>ssl.server.truststore.password</name>
-  <value>changeme</value>
-</property>
-
-<property>
-  <name>ssl.server.keystore.location</name>
-  <value>${javax.net.ssl.keyStore.proxy}</value>
-</property>
-
-<property>
-  <name>ssl.server.keystore.password</name>
-  <value>changeme</value>
-</property>
-
-<property>
-  <name>ssl.server.keystore.keypassword</name>
-  <value>changeme</value>
-</property>
-
-</configuration>
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/user-certs.xml b/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/user-certs.xml
deleted file mode 100644
index bce3a5a..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/user-certs.xml
+++ /dev/null
@@ -1,85 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- 
-
-This file defines the mappings from username to comma seperated list
-of certificate serial numbers that the user is allowed to use. One mapping
-per user. Wildcard characters, such as "*" and "?", are not recognized. 
-Any leading or trailing whitespaces are stripped/ignored.
-
--->
-
-<configuration>
-
-<property>
-  <name> kan </name>
-  <value> ,6  ,,  4 , 9a2cf0be9ddf8280
-
-
-
-         </value>
-</property>
-
-<property>
-  <name> k. zhang </name>
-  <value> ,  ,,  2 , 9a2cf0be9ddf8280
-
-
-
-         </value>
-</property>
-
-<property>
-  <name> zhiyong1 </name>
-  <value> ,5  ,,  3 , 9a2cf0be9ddf8280
-
-
-
-         </value>
-</property>
-
-<property>
-  <name> nobody </name>
-  <value> ,6  ,,  3 , 9a2cf0be9ddf8280
-
-
-
-         </value>
-</property>
-
-<property>
-  <name> root </name>
-  <value> ,7  ,,  3 , 9a2cf0be9ddf8280
-
-
-
-         </value>
-</property>
-
-<property>
-  <name> Admin </name>
-  <value>, 6,  ,,  3 , 9a2cf0be9ddf8280
-
-
-
-         </value>
-</property>
-
-</configuration>
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/user-permissions.xml b/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/user-permissions.xml
deleted file mode 100644
index d24d8cc..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/proxy-config/user-permissions.xml
+++ /dev/null
@@ -1,78 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<!-- 
-
-This file defines the mappings from user name to comma seperated list
-of directories/files that the user is allowed to access. One mapping
-per user. Wildcard characters, such as "*" and "?", are not recognized. 
-For example, to match "/output" directory, one can use "/output" or 
-"/output/", but not "/output/*". Note that any leading or trailing
-whitespaces are stripped/ignored for the name field.
-
--->
-
-<configuration>
-
-<property>
-  <name> kan </name>
-  <value> ,   
-
-
-
-        /input, /user, /data </value>
-</property>
-
-<property>
-  <name> k. zhang </name>
-  <value> ,   
-
-
-
-        /input, /user, /data </value>
-</property>
-
-<property>
-  <name> zhiyong1 </name>
-  <value> ,
-
-
-
-        /input, /user, /data </value>
-</property>
-
-<property>
-  <name> nobody </name>
-  <value> ,
-
-
-
-        /input, /user, /data </value>
-</property>
-
-<property>
-  <name> root </name>
-  <value> ,
-
-
-
-        /input, /user, /data </value>
-</property>
-
-
-</configuration>
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/ssl-keys/client.keystore b/hdfs/src/contrib/hdfsproxy/src/test/resources/ssl-keys/client.keystore
deleted file mode 100644
index c8428c2..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/ssl-keys/client.keystore
+++ /dev/null
Binary files differ
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/ssl-keys/proxy.keystore b/hdfs/src/contrib/hdfsproxy/src/test/resources/ssl-keys/proxy.keystore
deleted file mode 100644
index e3ae564..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/ssl-keys/proxy.keystore
+++ /dev/null
Binary files differ
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/ssl-keys/test.crt b/hdfs/src/contrib/hdfsproxy/src/test/resources/ssl-keys/test.crt
deleted file mode 100644
index b935e0f..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/ssl-keys/test.crt
+++ /dev/null
@@ -1,21 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDbjCCAtegAwIBAgIBBjANBgkqhkiG9w0BAQQFADCBijELMAkGA1UEBhMCVVMx
-EzARBgNVBAgTCkNhbGlmb3JuaWExEDAOBgNVBAcTB0J1cmJhbmsxDjAMBgNVBAoT
-BVlhaG9vMQ0wCwYDVQQLEwRHcmlkMQ4wDAYDVQQDEwVDbG91ZDElMCMGCSqGSIb3
-DQEJARYWemhpeW9uZzFAeWFob28taW5jLmNvbTAeFw0wOTAyMTExNzMxMTlaFw0y
-ODEwMjkxNzMxMTlaMFUxEDAOBgNVBAYTB1Vua25vd24xEDAOBgNVBAgTB1Vua25v
-d24xDzANBgNVBAoTBmhhZG9vcDENMAsGA1UECxMEdGVzdDEPMA0GA1UEAxMGbm9i
-b2R5MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCQnFDrGQ+lxdmwvv+8G3gc
-Vsj501WhlaEHa0xPJReeVfcxCRrE60k2Cb3nfHHyT0nW1vHZ0207T8LuGJKaaKMy
-5yIRTI7WwEDLqwxzl109Vlu8iBOqzJXcjo5YRSqmdEx8UYlrd67tMUyw0J5u8IlU
-UKT/OMt7YT4R89a9INyzTwIDAQABo4IBFjCCARIwCQYDVR0TBAIwADAsBglghkgB
-hvhCAQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYE
-FGAUh//jxUxfBpcAEp0ZAemyPbnDMIG3BgNVHSMEga8wgayAFIsZF5xSEMuMwUwN
-H89Xv8EJdxCqoYGQpIGNMIGKMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZv
-cm5pYTEQMA4GA1UEBxMHQnVyYmFuazEOMAwGA1UEChMFWWFob28xDTALBgNVBAsT
-BEdyaWQxDjAMBgNVBAMTBUNsb3VkMSUwIwYJKoZIhvcNAQkBFhZ6aGl5b25nMUB5
-YWhvby1pbmMuY29tggEAMA0GCSqGSIb3DQEBBAUAA4GBAG28KbSm/tV/ft+8//eB
-E87Cdq61ndPqiLhEaHrF3hKSsLzfbKaH9dQDqjTm+D7WVaxcFOn2V7ZUTMSZDHh2
-k+O8Tt642TB9HLbUtwEjoug3jHkx/uydcr8yhRb8/+x5flpU9hfaf0AU4Pc46Q95
-uMUVgIDvO5ED9OaoarJE8UbT
------END CERTIFICATE-----
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/tomcat-config/server.xml b/hdfs/src/contrib/hdfsproxy/src/test/resources/tomcat-config/server.xml
deleted file mode 100644
index 0fb51a0..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/tomcat-config/server.xml
+++ /dev/null
@@ -1,60 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<Server port="@ADMIN.PORT@" shutdown="SHUTDOWN" debug="4">
-
-  <Service name="Catalina" debug="4">
-
-    <Connector port="@HTTP.PORT@"
-        maxThreads="150" minSpareThreads="25" maxSpareThreads="75"
-        enableLookups="false" redirectPort="8443" acceptCount="100"
-        connectionTimeout="20000" disableUploadTimeout="true"
-        scheme="http" secure="false"
-        debug="4"/>
-    <Connector port="@HTTPS.PORT@" protocol="HTTP/1.1" SSLEnabled="true"
-               maxThreads="150" scheme="https" secure="true" 
-               keystoreFile="${javax.net.ssl.keyStore.proxy}" keystorePass="changeme" keystoreType="JKS" clientAuth="true" sslProtocol="TLS" />
-
-    <Engine name="Catalina" defaultHost="localhost" 
-        debug="4">
-
-      <Realm className="org.apache.catalina.realm.MemoryRealm" />
-
-      <!-- Note: There seems to be a bug in Tomcat 5.x if the debug attribute 
-           is present. Ideally we would have written:
-               debug="4"
-           However, doing this result in a NullPointerException in 
-           ExpandWar.java at line 145. -->
-      <Host name="localhost" appBase="webapps" unpackWARs="true"
-          autoDeploy="true" xmlValidation="false" xmlNamespaceAware="false">
-
-        <!-- Contexts to explicitely point to where the wars are located -->
-         <Context path="/test" docBase="${build.target}/test.war" debug="4">
- 
-</Context>
-
-        <Valve className="org.apache.catalina.valves.AccessLogValve"
-            directory="logs" prefix="localhost_access_log." suffix=".txt"
-            pattern="common" resolveHosts="false"/>
-          
-      </Host>
-
-    </Engine>
-
-  </Service>
-
-</Server>
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/tomcat-config/tomcat-users.xml b/hdfs/src/contrib/hdfsproxy/src/test/resources/tomcat-config/tomcat-users.xml
deleted file mode 100644
index 67f6939..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/tomcat-config/tomcat-users.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<tomcat-users>
-   <user name="admin" password="" roles="manager"/>
-</tomcat-users>
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/tomcat-config/web.xml b/hdfs/src/contrib/hdfsproxy/src/test/resources/tomcat-config/web.xml
deleted file mode 100644
index ff39e30..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/tomcat-config/web.xml
+++ /dev/null
@@ -1,964 +0,0 @@
-<?xml version="1.0" encoding="ISO-8859-1"?>
-<!DOCTYPE web-app
-     PUBLIC "-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN"
-    "http://java.sun.com/dtd/web-app_2_3.dtd">
-<web-app>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-  <!-- ======================== Introduction ============================== -->
-  <!-- This document defines default values for *all* web applications      -->
-  <!-- loaded into this instance of Tomcat.  As each application is         -->
-  <!-- deployed, this file is processed, followed by the                    -->
-  <!-- "/WEB-INF/web.xml" deployment descriptor from your own               -->
-  <!-- applications.                                                        -->
-  <!--                                                                      -->
-  <!-- WARNING:  Do not configure application-specific resources here!      -->
-  <!-- They should go in the "/WEB-INF/web.xml" file in your application.   -->
-
-
-  <!-- ================== Built In Servlet Definitions ==================== -->
-
-
-  <!-- The default servlet for all web applications, that serves static     -->
-  <!-- resources.  It processes all requests that are not mapped to other   -->
-  <!-- servlets with servlet mappings (defined either here or in your own   -->
-  <!-- web.xml file.  This servlet supports the following initialization    -->
-  <!-- parameters (default values are in square brackets):                  -->
-  <!--                                                                      -->
-  <!--   debug               Debugging detail level for messages logged     -->
-  <!--                       by this servlet.  [0]                          -->
-  <!--                                                                      -->
-  <!--   input               Input buffer size (in bytes) when reading      -->
-  <!--                       resources to be served.  [2048]                -->
-  <!--                                                                      -->
-  <!--   listings            Should directory listings be produced if there -->
-  <!--                       is no welcome file in this directory?  [true]  -->
-  <!--                                                                      -->
-  <!--   output              Output buffer size (in bytes) when writing     -->
-  <!--                       resources to be served.  [2048]                -->
-  <!--                                                                      -->
-  <!--   readonly            Is this context "read only", so HTTP           -->
-  <!--                       commands like PUT and DELETE are               -->
-  <!--                       rejected?  [true]                              -->
-  <!--                                                                      -->
-  <!--   readmeFile          File name to display with the directory        -->
-  <!--                       contents. [null]                               -->
-  <!--                                                                      -->
-  <!--  For directory listing customization. Checks localXsltFile, then     -->
-  <!--  globalXsltFile, then defaults to original behavior.                 -->
-  <!--                                                                      -->
-  <!--   localXsltFile       Make directory listings an XML doc and         -->
-  <!--                       pass the result to this style sheet residing   -->
-  <!--                       in that directory. This overrides              -->
-  <!--                        globalXsltFile[null]                          -->
-  <!--                                                                      -->
-  <!--   globalXsltFile      Site wide configuration version of             -->
-  <!--                       localXsltFile This argument is expected        -->
-  <!--                       to be a physical file. [null]                  -->
-  <!--                                                                      -->
-  <!--                                                                      -->
-
-    <servlet>
-        <servlet-name>default</servlet-name>
-        <servlet-class>
-          org.apache.catalina.servlets.DefaultServlet
-        </servlet-class>
-        <init-param>
-            <param-name>debug</param-name>
-            <param-value>0</param-value>
-        </init-param>
-        <init-param>
-            <param-name>listings</param-name>
-            <param-value>false</param-value>
-        </init-param>
-        <load-on-startup>1</load-on-startup>
-    </servlet>
-
-
-  <!-- The "invoker" servlet, which executes anonymous servlet classes      -->
-  <!-- that have not been defined in a web.xml file.  Traditionally, this   -->
-  <!-- servlet is mapped to the URL pattern "/servlet/*", but you can map   -->
-  <!-- it to other patterns as well.  The extra path info portion of such a -->
-  <!-- request must be the fully qualified class name of a Java class that  -->
-  <!-- implements Servlet (or extends HttpServlet), or the servlet name     -->
-  <!-- of an existing servlet definition.     This servlet supports the     -->
-  <!-- following initialization parameters (default values are in square    -->
-  <!-- brackets):                                                           -->
-  <!--                                                                      -->
-  <!--   debug               Debugging detail level for messages logged     -->
-  <!--                       by this servlet.  [0]                          -->
-
-<!--
-    <servlet>
-        <servlet-name>invoker</servlet-name>
-        <servlet-class>
-          org.apache.catalina.servlets.InvokerServlet
-        </servlet-class>
-        <init-param>
-            <param-name>debug</param-name>
-            <param-value>0</param-value>
-        </init-param>
-        <load-on-startup>2</load-on-startup>
-    </servlet>
--->
-
-
-  <!-- The JSP page compiler and execution servlet, which is the mechanism  -->
-  <!-- used by Tomcat to support JSP pages.  Traditionally, this servlet    -->
-  <!-- is mapped to the URL pattern "*.jsp".  This servlet supports the     -->
-  <!-- following initialization parameters (default values are in square    -->
-  <!-- brackets):                                                           -->
-  <!--                                                                      -->
-  <!--   checkInterval       If development is false and reloading is true, -->
-  <!--                       background compiles are enabled. checkInterval -->
-  <!--                       is the time in seconds between checks to see   -->
-  <!--                       if a JSP page needs to be recompiled. [300]    -->
-  <!--                                                                      -->
-  <!--   compiler            Which compiler Ant should use to compile JSP   -->
-  <!--                       pages.  See the Ant documentation for more     -->
-  <!--                       information. [javac]                           -->
-  <!--                                                                      -->
-  <!--   classdebuginfo      Should the class file be compiled with         -->
-  <!--                       debugging information?  [true]                 -->
-  <!--                                                                      -->
-  <!--   classpath           What class path should I use while compiling   -->
-  <!--                       generated servlets?  [Created dynamically      -->
-  <!--                       based on the current web application]          -->
-  <!--                                                                      -->
-  <!--   development         Is Jasper used in development mode (will check -->
-  <!--                       for JSP modification on every access)?  [true] -->
-  <!--                                                                      -->
-  <!--   enablePooling       Determines whether tag handler pooling is      -->
-  <!--                       enabled  [true]                                -->
-  <!--                                                                      -->
-  <!--   fork                Tell Ant to fork compiles of JSP pages so that -->
-  <!--                       a separate JVM is used for JSP page compiles   -->
-  <!--                       from the one Tomcat is running in. [true]      -->
-  <!--                                                                      -->
-  <!--   ieClassId           The class-id value to be sent to Internet      -->
-  <!--                       Explorer when using <jsp:plugin> tags.         -->
-  <!--                       [clsid:8AD9C840-044E-11D1-B3E9-00805F499D93]   -->
-  <!--                                                                      -->
-  <!--   javaEncoding        Java file encoding to use for generating java  -->
-  <!--                       source files. [UTF8]                           -->
-  <!--                                                                      -->
-  <!--   keepgenerated       Should we keep the generated Java source code  -->
-  <!--                       for each page instead of deleting it? [true]   -->
-  <!--                                                                      -->
-  <!--   mappedfile          Should we generate static content with one     -->
-  <!--                       print statement per input line, to ease        -->
-  <!--                       debugging?  [true]                            -->
-  <!--                                                                      -->
-  <!--   trimSpaces          Should white spaces in template text between   -->
-  <!--                       actions or directives be trimmed?  [false]     -->
-  <!--                                                                      -->
-  <!--   reloading           Should Jasper check for modified JSPs?  [true] -->
-  <!--                                                                      -->
-  <!--   suppressSmap        Should the generation of SMAP info for JSR45   -->
-  <!--                       debugging be suppressed?  [false]              -->
-  <!--                                                                      -->
-  <!--   dumpSmap            Should the SMAP info for JSR45 debugging be    -->
-  <!--                       dumped to a file? [false]                      -->
-  <!--                       False if suppressSmap is true                  -->
-  <!--                                                                      -->
-  <!--   genStrAsCharArray   Should text strings be generated as char       -->
-  <!--                       arrays, to improve performance in some cases?  -->
-  <!--                       [false]                                        -->
-  <!--                                                                      -->
-  <!--   errorOnUseBeanInvalidClassAttribute                                -->
-  <!--                       Should Jasper issue an error when the value of -->
-  <!--                       the class attribute in an useBean action is    -->
-  <!--                       not a valid bean class?  [true]                -->
-  <!--                                                                      -->
-  <!--   scratchdir          What scratch directory should we use when      -->
-  <!--                       compiling JSP pages?  [default work directory  -->
-  <!--                       for the current web application]               -->
-  <!--                                                                      -->
-  <!--   xpoweredBy          Determines whether X-Powered-By response       -->
-  <!--                       header is added by generated servlet  [false]  -->
-  <!--                                                                      -->
-  <!-- If you wish to use Jikes to compile JSP pages:                       -->
-  <!--   Set the init parameter "compiler" to "jikes".  Define              -->
-  <!--   the property "-Dbuild.compiler.emacs=true" when starting Tomcat    -->
-  <!--   by adding the above to your CATALINA_OPTS environment variable.    -->
-  <!--   If you get an error reporting that jikes can't use UTF8 encoding,  -->
-  <!--   try setting the init parameter "javaEncoding" to "ISO-8859-1".     -->
-
-    <servlet>
-        <servlet-name>jsp</servlet-name>
-        <servlet-class>org.apache.jasper.servlet.JspServlet</servlet-class>
-        <init-param>
-            <param-name>fork</param-name>
-            <param-value>false</param-value>
-        </init-param>
-        <init-param>
-            <param-name>xpoweredBy</param-name>
-            <param-value>false</param-value>
-        </init-param>
-        <load-on-startup>3</load-on-startup>
-    </servlet>
-
-
-  <!-- Server Side Includes processing servlet, which processes SSI         -->
-  <!-- directives in HTML pages consistent with similar support in web      -->
-  <!-- servers like Apache.  Traditionally, this servlet is mapped to the   -->
-  <!-- URL pattern "*.shtml".  This servlet supports the following          -->
-  <!-- initialization parameters (default values are in square brackets):   -->
-  <!--                                                                      -->
-  <!--   buffered            Should output from this servlet be buffered?   -->
-  <!--                       (0=false, 1=true)  [0]                         -->
-  <!--                                                                      -->
-  <!--   debug               Debugging detail level for messages logged     -->
-  <!--                       by this servlet.  [0]                          -->
-  <!--                                                                      -->
-  <!--   expires             The number of seconds before a page with SSI   -->
-  <!--                       directives will expire.  [No default]          -->
-  <!--                                                                      -->
-  <!--   isVirtualWebappRelative                                            -->
-  <!--                       Should "virtual" paths be interpreted as       -->
-  <!--                       relative to the context root, instead of       -->
-  <!--                       the server root?  (0=false, 1=true) [0]        -->
-  <!--                                                                      -->
-  <!--                                                                      -->
-  <!-- IMPORTANT: To use the SSI servlet, you also need to rename the       -->
-  <!--            $CATALINA_HOME/server/lib/servlets-ssi.renametojar file   -->
-  <!--            to $CATALINA_HOME/server/lib/servlets-ssi.jar             -->
-
-<!--
-    <servlet>
-        <servlet-name>ssi</servlet-name>
-        <servlet-class>
-          org.apache.catalina.ssi.SSIServlet
-        </servlet-class>
-        <init-param>
-          <param-name>buffered</param-name>
-          <param-value>1</param-value>
-        </init-param>
-        <init-param>
-          <param-name>debug</param-name>
-          <param-value>0</param-value>
-        </init-param>
-        <init-param>
-          <param-name>expires</param-name>
-          <param-value>666</param-value>
-        </init-param>
-        <init-param>
-          <param-name>isVirtualWebappRelative</param-name>
-          <param-value>0</param-value>
-        </init-param>
-        <load-on-startup>4</load-on-startup>
-    </servlet>
--->
-
-
-  <!-- Common Gateway Includes (CGI) processing servlet, which supports     -->
-  <!-- execution of external applications that conform to the CGI spec      -->
-  <!-- requirements.  Typically, this servlet is mapped to the URL pattern  -->
-  <!-- "/cgi-bin/*", which means that any CGI applications that are         -->
-  <!-- executed must be present within the web application.  This servlet   -->
-  <!-- supports the following initialization parameters (default values     -->
-  <!-- are in square brackets):                                             -->
-  <!--                                                                      -->
-  <!--   cgiPathPrefix       The CGI search path will start at              -->
-  <!--                       webAppRootDir + File.separator + this prefix.  -->
-  <!--                       [WEB-INF/cgi]                                  -->
-  <!--                                                                      -->
-  <!--   clientInputTimeout  The time (in milliseconds) to wait for input   -->
-  <!--                       from the browser before assuming that there    -->
-  <!--                       is none.  [100]                                -->
-  <!--                                                                      -->
-  <!--   debug               Debugging detail level for messages logged     -->
-  <!--                       by this servlet.  [0]                          -->
-  <!--                                                                      -->
-  <!--   executable          Name of the exectuable used to run the script. -->
-  <!--                       [perl]                                         -->
-  <!--                                                                      -->
-  <!--   parameterEncoding   Name of parameter encoding to be used with CGI -->
-  <!--                       servlet.                                       -->
-  <!--                       [System.getProperty("file.encoding","UTF-8")]  -->
-  <!--                                                                      -->
-  <!-- IMPORTANT: To use the CGI servlet, you also need to rename the       -->
-  <!--            $CATALINA_HOME/server/lib/servlets-cgi.renametojar file   -->
-  <!--            to $CATALINA_HOME/server/lib/servlets-cgi.jar             -->
-
-<!--
-    <servlet>
-        <servlet-name>cgi</servlet-name>
-        <servlet-class>org.apache.catalina.servlets.CGIServlet</servlet-class>
-        <init-param>
-          <param-name>clientInputTimeout</param-name>
-          <param-value>100</param-value>
-        </init-param>
-        <init-param>
-          <param-name>debug</param-name>
-          <param-value>6</param-value>
-        </init-param>
-        <init-param>
-          <param-name>cgiPathPrefix</param-name>
-          <param-value>WEB-INF/cgi</param-value>
-        </init-param>
-         <load-on-startup>5</load-on-startup>
-    </servlet>
--->
-
-
-  <!-- ================ Built In Servlet Mappings ========================= -->
-
-
-  <!-- The servlet mappings for the built in servlets defined above.  Note  -->
-  <!-- that, by default, the CGI and SSI servlets are *not* mapped.  You    -->
-  <!-- must uncomment these mappings (or add them to your application's own -->
-  <!-- web.xml deployment descriptor) to enable these services              -->
-
-    <!-- The mapping for the default servlet -->
-    <servlet-mapping>
-        <servlet-name>default</servlet-name>
-        <url-pattern>/</url-pattern>
-    </servlet-mapping>
-
-    <!-- The mapping for the invoker servlet -->
-<!--
-    <servlet-mapping>
-        <servlet-name>invoker</servlet-name>
-        <url-pattern>/servlet/*</url-pattern>
-    </servlet-mapping>
--->
-
-    <!-- The mapping for the JSP servlet -->
-    <servlet-mapping>
-        <servlet-name>jsp</servlet-name>
-        <url-pattern>*.jsp</url-pattern>
-    </servlet-mapping>
-
-    <servlet-mapping>
-        <servlet-name>jsp</servlet-name>
-        <url-pattern>*.jspx</url-pattern>
-    </servlet-mapping>
-
-    <!-- The mapping for the SSI servlet -->
-<!--
-    <servlet-mapping>
-        <servlet-name>ssi</servlet-name>
-        <url-pattern>*.shtml</url-pattern>
-    </servlet-mapping>
--->
-
-    <!-- The mapping for the CGI Gateway servlet -->
-
-<!--
-    <servlet-mapping>
-        <servlet-name>cgi</servlet-name>
-        <url-pattern>/cgi-bin/*</url-pattern>
-    </servlet-mapping>
--->
-
-
-  <!-- ==================== Default Session Configuration ================= -->
-  <!-- You can set the default session timeout (in minutes) for all newly   -->
-  <!-- created sessions by modifying the value below.                       -->
-
-    <session-config>
-        <session-timeout>30</session-timeout>
-    </session-config>
-
-
-  <!-- ===================== Default MIME Type Mappings =================== -->
-  <!-- When serving static resources, Tomcat will automatically generate    -->
-  <!-- a "Content-Type" header based on the resource's filename extension,  -->
-  <!-- based on these mappings.  Additional mappings can be added here (to  -->
-  <!-- apply to all web applications), or in your own application's web.xml -->
-  <!-- deployment descriptor.                                               -->
-
-    <mime-mapping>
-        <extension>abs</extension>
-        <mime-type>audio/x-mpeg</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>ai</extension>
-        <mime-type>application/postscript</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>aif</extension>
-        <mime-type>audio/x-aiff</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>aifc</extension>
-        <mime-type>audio/x-aiff</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>aiff</extension>
-        <mime-type>audio/x-aiff</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>aim</extension>
-        <mime-type>application/x-aim</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>art</extension>
-        <mime-type>image/x-jg</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>asf</extension>
-        <mime-type>video/x-ms-asf</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>asx</extension>
-        <mime-type>video/x-ms-asf</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>au</extension>
-        <mime-type>audio/basic</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>avi</extension>
-        <mime-type>video/x-msvideo</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>avx</extension>
-        <mime-type>video/x-rad-screenplay</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>bcpio</extension>
-        <mime-type>application/x-bcpio</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>bin</extension>
-        <mime-type>application/octet-stream</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>bmp</extension>
-        <mime-type>image/bmp</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>body</extension>
-        <mime-type>text/html</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>cdf</extension>
-        <mime-type>application/x-cdf</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>cer</extension>
-        <mime-type>application/x-x509-ca-cert</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>class</extension>
-        <mime-type>application/java</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>cpio</extension>
-        <mime-type>application/x-cpio</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>csh</extension>
-        <mime-type>application/x-csh</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>css</extension>
-        <mime-type>text/css</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>dib</extension>
-        <mime-type>image/bmp</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>doc</extension>
-        <mime-type>application/msword</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>dtd</extension>
-        <mime-type>text/plain</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>dv</extension>
-        <mime-type>video/x-dv</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>dvi</extension>
-        <mime-type>application/x-dvi</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>eps</extension>
-        <mime-type>application/postscript</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>etx</extension>
-        <mime-type>text/x-setext</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>exe</extension>
-        <mime-type>application/octet-stream</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>gif</extension>
-        <mime-type>image/gif</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>gtar</extension>
-        <mime-type>application/x-gtar</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>gz</extension>
-        <mime-type>application/x-gzip</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>hdf</extension>
-        <mime-type>application/x-hdf</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>hqx</extension>
-        <mime-type>application/mac-binhex40</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>htc</extension>
-        <mime-type>text/x-component</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>htm</extension>
-        <mime-type>text/html</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>html</extension>
-        <mime-type>text/html</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>hqx</extension>
-        <mime-type>application/mac-binhex40</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>ief</extension>
-        <mime-type>image/ief</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>jad</extension>
-        <mime-type>text/vnd.sun.j2me.app-descriptor</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>jar</extension>
-        <mime-type>application/java-archive</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>java</extension>
-        <mime-type>text/plain</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>jnlp</extension>
-        <mime-type>application/x-java-jnlp-file</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>jpe</extension>
-        <mime-type>image/jpeg</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>jpeg</extension>
-        <mime-type>image/jpeg</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>jpg</extension>
-        <mime-type>image/jpeg</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>js</extension>
-        <mime-type>text/javascript</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>jsf</extension>
-        <mime-type>text/plain</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>jspf</extension>
-        <mime-type>text/plain</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>kar</extension>
-        <mime-type>audio/x-midi</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>latex</extension>
-        <mime-type>application/x-latex</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>m3u</extension>
-        <mime-type>audio/x-mpegurl</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>mac</extension>
-        <mime-type>image/x-macpaint</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>man</extension>
-        <mime-type>application/x-troff-man</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>me</extension>
-        <mime-type>application/x-troff-me</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>mid</extension>
-        <mime-type>audio/x-midi</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>midi</extension>
-        <mime-type>audio/x-midi</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>mif</extension>
-        <mime-type>application/x-mif</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>mov</extension>
-        <mime-type>video/quicktime</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>movie</extension>
-        <mime-type>video/x-sgi-movie</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>mp1</extension>
-        <mime-type>audio/x-mpeg</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>mp2</extension>
-        <mime-type>audio/x-mpeg</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>mp3</extension>
-        <mime-type>audio/x-mpeg</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>mpa</extension>
-        <mime-type>audio/x-mpeg</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>mpe</extension>
-        <mime-type>video/mpeg</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>mpeg</extension>
-        <mime-type>video/mpeg</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>mpega</extension>
-        <mime-type>audio/x-mpeg</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>mpg</extension>
-        <mime-type>video/mpeg</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>mpv2</extension>
-        <mime-type>video/mpeg2</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>ms</extension>
-        <mime-type>application/x-wais-source</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>nc</extension>
-        <mime-type>application/x-netcdf</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>oda</extension>
-        <mime-type>application/oda</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>pbm</extension>
-        <mime-type>image/x-portable-bitmap</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>pct</extension>
-        <mime-type>image/pict</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>pdf</extension>
-        <mime-type>application/pdf</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>pgm</extension>
-        <mime-type>image/x-portable-graymap</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>pic</extension>
-        <mime-type>image/pict</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>pict</extension>
-        <mime-type>image/pict</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>pls</extension>
-        <mime-type>audio/x-scpls</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>png</extension>
-        <mime-type>image/png</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>pnm</extension>
-        <mime-type>image/x-portable-anymap</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>pnt</extension>
-        <mime-type>image/x-macpaint</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>ppm</extension>
-        <mime-type>image/x-portable-pixmap</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>ppt</extension>
-        <mime-type>application/powerpoint</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>ps</extension>
-        <mime-type>application/postscript</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>psd</extension>
-        <mime-type>image/x-photoshop</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>qt</extension>
-        <mime-type>video/quicktime</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>qti</extension>
-        <mime-type>image/x-quicktime</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>qtif</extension>
-        <mime-type>image/x-quicktime</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>ras</extension>
-        <mime-type>image/x-cmu-raster</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>rgb</extension>
-        <mime-type>image/x-rgb</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>rm</extension>
-        <mime-type>application/vnd.rn-realmedia</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>roff</extension>
-        <mime-type>application/x-troff</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>rtf</extension>
-        <mime-type>application/rtf</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>rtx</extension>
-        <mime-type>text/richtext</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>sh</extension>
-        <mime-type>application/x-sh</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>shar</extension>
-        <mime-type>application/x-shar</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>smf</extension>
-        <mime-type>audio/x-midi</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>sit</extension>
-        <mime-type>application/x-stuffit</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>snd</extension>
-        <mime-type>audio/basic</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>src</extension>
-        <mime-type>application/x-wais-source</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>sv4cpio</extension>
-        <mime-type>application/x-sv4cpio</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>sv4crc</extension>
-        <mime-type>application/x-sv4crc</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>swf</extension>
-        <mime-type>application/x-shockwave-flash</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>t</extension>
-        <mime-type>application/x-troff</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>tar</extension>
-        <mime-type>application/x-tar</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>tcl</extension>
-        <mime-type>application/x-tcl</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>tex</extension>
-        <mime-type>application/x-tex</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>texi</extension>
-        <mime-type>application/x-texinfo</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>texinfo</extension>
-        <mime-type>application/x-texinfo</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>tif</extension>
-        <mime-type>image/tiff</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>tiff</extension>
-        <mime-type>image/tiff</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>tr</extension>
-        <mime-type>application/x-troff</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>tsv</extension>
-        <mime-type>text/tab-separated-values</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>txt</extension>
-        <mime-type>text/plain</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>ulw</extension>
-        <mime-type>audio/basic</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>ustar</extension>
-        <mime-type>application/x-ustar</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>xbm</extension>
-        <mime-type>image/x-xbitmap</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>xht</extension>
-        <mime-type>application/xhtml+xml</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>xhtml</extension>
-        <mime-type>application/xhtml+xml</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>xml</extension>
-        <mime-type>text/xml</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>xpm</extension>
-        <mime-type>image/x-xpixmap</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>xsl</extension>
-        <mime-type>text/xml</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>xwd</extension>
-        <mime-type>image/x-xwindowdump</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>wav</extension>
-        <mime-type>audio/x-wav</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>svg</extension>
-        <mime-type>image/svg+xml</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>svgz</extension>
-        <mime-type>image/svg+xml</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>vsd</extension>
-        <mime-type>application/x-visio</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <!-- Wireless Bitmap -->
-        <extension>wbmp</extension>
-        <mime-type>image/vnd.wap.wbmp</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <!-- WML Source -->
-        <extension>wml</extension>
-        <mime-type>text/vnd.wap.wml</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <!-- Compiled WML -->
-        <extension>wmlc</extension>
-        <mime-type>application/vnd.wap.wmlc</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <!-- WML Script Source -->
-        <extension>wmls</extension>
-        <mime-type>text/vnd.wap.wmlscript</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <!-- Compiled WML Script -->
-        <extension>wmlscriptc</extension>
-        <mime-type>application/vnd.wap.wmlscriptc</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>wrl</extension>
-        <mime-type>x-world/x-vrml</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>Z</extension>
-        <mime-type>application/x-compress</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>z</extension>
-        <mime-type>application/x-compress</mime-type>
-    </mime-mapping>
-    <mime-mapping>
-        <extension>zip</extension>
-        <mime-type>application/zip</mime-type>
-    </mime-mapping>
-
-
-  <!-- ==================== Default Welcome File List ===================== -->
-  <!-- When a request URI refers to a directory, the default servlet looks  -->
-  <!-- for a "welcome file" within that directory and, if present,          -->
-  <!-- to the corresponding resource URI for display.  If no welcome file   -->
-  <!-- is present, the default servlet either serves a directory listing,   -->
-  <!-- or returns a 404 status, depending on how it is configured.          -->
-  <!--                                                                      -->
-  <!-- If you define welcome files in your own application's web.xml        -->
-  <!-- deployment descriptor, that list *replaces* the list configured      -->
-  <!-- here, so be sure that you include any of the default values that     -->
-  <!-- you wish to include.                                                 -->
-
-    <welcome-file-list>
-        <welcome-file>index.html</welcome-file>
-        <welcome-file>index.htm</welcome-file>
-        <welcome-file>index.jsp</welcome-file>
-    </welcome-file-list>
-
-</web-app>
diff --git a/hdfs/src/contrib/hdfsproxy/src/test/resources/tomcat-web.xml b/hdfs/src/contrib/hdfsproxy/src/test/resources/tomcat-web.xml
deleted file mode 100644
index b264c8a..0000000
--- a/hdfs/src/contrib/hdfsproxy/src/test/resources/tomcat-web.xml
+++ /dev/null
@@ -1,154 +0,0 @@
-<?xml version="1.0" encoding="ISO-8859-1"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!DOCTYPE web-app 
-    PUBLIC "-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN" 
-    "http://java.sun.com/dtd/web-app_2_3.dtd">
-
-<web-app>
-
-
-    <!-- General description of your web application -->
-
-    <display-name>HDFS Proxy</display-name>
-    <description>
-      get data from grid 
-    </description>
-
-
-    <!-- Context initialization parameters that define shared
-         String constants used within your application, which
-         can be customized by the system administrator who is
-         installing your application.  The values actually
-         assigned to these parameters can be retrieved in a
-         servlet or JSP page by calling:
-
-             String value =
-               getServletContext().getInitParameter("name");
-
-         where "name" matches the <param-name> element of
-         one of these initialization parameters.
-
-         You can define any number of context initialization
-         parameters, including zero.
-    -->
-
-    <context-param>
-      <param-name>webmaster</param-name>
-      <param-value>zhiyong1@yahoo-inc.com</param-value>
-      <description>
-        The EMAIL address of the administrator to whom questions
-        and comments about this application should be addressed.
-      </description>
-    </context-param>
-    
-     	
-
-
-    <!-- Servlet definitions for the servlets that make up
-         your web application, including initialization
-         parameters.  With Tomcat, you can also send requests
-         to servlets not listed here with a request like this:
-
-           http://localhost:8080/{context-path}/servlet/{classname}
-
-         but this usage is not guaranteed to be portable.  It also
-         makes relative references to images and other resources
-         required by your servlet more complicated, so defining
-         all of your servlets (and defining a mapping to them with
-         a servlet-mapping element) is recommended.
-
-         Servlet initialization parameters can be retrieved in a
-         servlet or JSP page by calling:
-
-             String value =
-               getServletConfig().getInitParameter("name");
-
-         where "name" matches the <param-name> element of
-         one of these initialization parameters.
-
-         You can define any number of servlets, including zero.
-    -->
-
-		 <filter>
-	        <filter-name>proxyFilter</filter-name>
-	        <filter-class>org.apache.hadoop.hdfsproxy.ProxyFilter</filter-class>
-	   </filter>
-
-    <filter-mapping>
-        <filter-name>proxyFilter</filter-name>
-        <url-pattern>/*</url-pattern>
-    </filter-mapping>
-    
-    <servlet>
-    	<servlet-name>listPaths</servlet-name>
-      <description>list paths data access</description>
-      <servlet-class>org.apache.hadoop.hdfsproxy.ProxyListPathsServlet</servlet-class>
-    </servlet>
-    
-    <servlet-mapping>
-        <servlet-name>listPaths</servlet-name>
-        <url-pattern>/listPaths/*</url-pattern>
-    </servlet-mapping>
-
-		<servlet>
-    	<servlet-name>data</servlet-name>
-      <description>data access</description>
-      <servlet-class>org.apache.hadoop.hdfsproxy.ProxyFileDataServlet</servlet-class>
-    </servlet>
-    
-	  <servlet-mapping>
-        <servlet-name>data</servlet-name>
-        <url-pattern>/data/*</url-pattern>
-    </servlet-mapping>
-    
-    <servlet>
-    	<servlet-name>streamFile</servlet-name>
-      <description>stream file access</description>
-      <servlet-class>org.apache.hadoop.hdfsproxy.ProxyStreamFile</servlet-class>
-    </servlet>
-    
-    <servlet-mapping>
-        <servlet-name>streamFile</servlet-name>
-        <url-pattern>/streamFile/*</url-pattern>
-    </servlet-mapping>
-    
-
-		<welcome-file-list>
-		  <welcome-file>index.html</welcome-file>
-		</welcome-file-list>
-
-    <!-- Define the default session timeout for your application,
-         in minutes.  From a servlet or JSP page, you can modify
-         the timeout for a particular session dynamically by using
-         HttpSession.getMaxInactiveInterval(). -->
-
-    <session-config>
-      <session-timeout>30</session-timeout>    <!-- 30 minutes -->
-    </session-config>    
-
-
-</web-app>
-
-
-
-
-
-
-
-
diff --git a/hdfs/src/docs/src/documentation/content/xdocs/hdfsproxy.xml b/hdfs/src/docs/src/documentation/content/xdocs/hdfsproxy.xml
deleted file mode 100644
index 7d52880..0000000
--- a/hdfs/src/docs/src/documentation/content/xdocs/hdfsproxy.xml
+++ /dev/null
@@ -1,601 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN" "http://forrest.apache.org/dtd/document-v20.dtd">
-
-
-<document>
-
-  <header>
-    <title> HDFS Proxy Guide</title>
-  </header>
-
-  <body>
-    <section>
-      <title> Introduction </title>
-      <p> HDFS Proxy is a proxy server through which a hadoop client (through HSFTP) or a standard
-        HTTPS client (wget, curl, etc) can talk to a hadoop server and more importantly pull data from
-        the sever. It put an access control layer in front of hadoop namenode server and
-        extends its functionalities to allow hadoop cross-version data transfer. </p>     
-    </section>
-
-    <section>
-      <title> Goals and Use Cases </title>
-      <section>
-        <title> Data Transfer from HDFS clusters </title>
-        <ul>
-          <li>User uses HSFTP protocol (hadoop distcp/fs, etc) to access HDFS proxy to copy out data stored on one or more HDFS clusters.</li>
-          <li>User uses HTTPS protocol (curl, wget, etc) to access HDFS proxy to copy out data stored on one or more HDFS clusters </li>
-        </ul>
-      </section>
-      
-      <section>
-        <title> Cross-version Data Transfer </title>
-        <p>There are multiple HDFS clusters and possibly in different hadoop versions, each holding
-          different data. A client need to access these data in a standard way without worrying about
-          version compatibility issues. </p>
-      </section>
-      
-      <section>
-        <title> User Access Control </title>
-        <ul>
-          <li>User Access Control through SSL certificates</li>
-          <li>User Access Control through LDAP (Lightweight Directory Access Protocol) server</li>
-        </ul>
-      </section>
-      
-    </section>
-    
-    <section>
-      <title> Comparison with NameNode's H(S)FTP Interface </title>
-      <p>NameNode has a http listener started at <code>dfs.namenode.http-address</code> with default port 50070 when NameNode is started and it provided a HFTP interface for the client. Also it could have a https listener started at <code>dfs.namenode.https-address</code> if <code>dfs.https.enable</code> is defined as true (by default, <code>dfs.https.enable</code> is not defined) to provide a HSFTP interface for client.</p>
-      <section>
-        <title>Advantages of Proxy Over NameNode HTTP(S) server</title>
-        <ol>
-          <li>We can centralize access control layer to the proxy part so that NameNode server can lower its burden. In this sense, HDFS proxy plays a filtering role to control data access to NameNode and DataNodes. It is especially useful if the HDFS system has some sensitive data in it. 
-          </li>
-          <li> After modulizing HDFS proxy into a standalone package, we can decouple the complexity of HDFS system and expand the proxy functionalities without worring about affecting other HDFS system features.
-          </li>
-        </ol>
-      </section>
-      <section>
-        <title>Disadvantages of Using Proxy Instead of Getting Data Directly from H(S)FTP Interface: Slower in speed. This is due to</title>
-        <ol>
-          <li>HDFS proxy need to first copy data from source cluster, then transfer the data out to the client.</li>
-          <li> Unlike H(S)FTP interface, where file status listing, etc., is through NameNode server, and real data transfer is redirected to the real DataNode server, all data transfer under HDFS proxy is through the proxy server.</li>
-        </ol>        
-      </section>
-
-    </section>
-    
-    <section>
-      <title> Design </title>
-      <section>
-        <title> Design Overview </title>
-        <figure src="images/hdfsproxy-overview.jpg" alt="HDFS Proxy Architecture"/>
-        <p>As shown in the above figure, in the client-side, proxy server will accept requests from HSFTP client and HTTPS client. The requests will pass through a filter module (containing one or more filters) for access control checking. Then the requests will go through a delegation module, whose responsibility is to direct the requests to the right client version for accessing the source cluster. After that, the delegated client will talk to the source cluster server through RPC protocol using servlets. </p>
-      </section>
-  
-      <section>
-        <title> Filter Module: Proxy Authentication and Access Control </title>
-        <figure src="images/hdfsproxy-server.jpg" alt="HDFS Proxy Filters"/>
-        
-        <p> To realize proxy authentication and access control, we used a servlet filter. The filter module is very
-          flexible, it can be installed or disabled by simply changing the corresponding items in deployment
-          descriptor file (web.xml). We implemented two filters in the proxy code: ProxyFilter and LdapIpDirFilter. The process of how each filter works is listed as below.</p>
-               
-        <section>
-          <title>SSL certificate-based proxyFilter</title>
-          <ol>
-            <li>A user will use a pre-issued SSL certificate to access the proxy.</li>
-            <li>The proxy server will authenticate the user certificate.</li>
-            <li>The user’s authenticated identity (extracted from the user’s SSL certificate) is used to check access to data on the proxy.</li>
-            <li>User access information is stored in two configuration files, user-certs.xml and user-permissions.xml.</li>
-            <li>The proxy will forward the user’s authenticated identity to HDFS clusters for HDFS file permission checking</li>
-          </ol>
-        </section>
-        
-        <section>
-          <title>LDAP-based LdapIpDirFilter</title>
-          <ol>
-            <li>A standalone LDAP server need to be set-up to store user information as entries, and each entry contains userId, user group, IP address(es), allowable HDFS directories, etc.</li>
-            <li>An LDAP entry may contain multiple IP addresses with the same userId and group attribute to realize headless account.</li>
-            <li>Upon receiving a request, the proxy server will extract the user's Ip adress from the request header, query the LDAP server with the IP address to get the direcotry permission information, then compare that with the user request path to make a allow/deny decision.</li>
-          </ol>
-        </section>
-        <p>SSL-based proxyFilter provides strong PKI authentication and encryption, proxy server can create a self-signed CA using OpenSSL and use that CA to sign and issue certificates to clients. </p>
-        <p>Managing access information through configuration files is a convenient way to start and easy to set-up for a small user group. However, to scale to a large user group and to handle account management operations such as add, delete, and change access, a separate package or a different mechanism like LDAP server is needed.</p>
-        <p>The schema for the entry attributes in the LDAP server should match what is used in the proxy. The schema that is currently used in proxy is configurable through hdfsproxy-default.xml, but the attributes should always contain IP address (default as uniqueMember), userId (default as uid), user group (default as userClass), and alloable HDFS directories (default as documentLocation).</p>
-        <p>Users can also write their own filters to plug in the filter chain to realize extended functionalities.</p>
-      </section>
-      
-      <section>
-        <title> Delegation Module: HDFS Cross-version Data Transfer </title>
-        <figure src="images/hdfsproxy-forward.jpg" alt="HDFS Proxy Forwarding"/> 
-        <p>As shown in the Figure, the delegation module contains two parts: </p>
-        <ol>
-          <li>A Forwarding war, which plays the role of identifying the requests and directing the requests to the right HDFS client RPC version. </li>
-          <li>Several RPC client versions necessary to talk to all the HDFS source cluster servers. </li>
-        </ol>
-        <p>All servlets are packaged in the WAR files.</p>
-        <p>Strictly speaking, HDFS proxy does not by itself solve HDFS cross-version communication problem. However, through wrapping all the RPC client versions and delegating the client requests to the right version of RPC clients, HDFS proxy functions as if it can talk to multiple source clusters in different hadoop versions.</p>
-        <p>Packaging the servlets in the WAR files has several advantages:</p>
-        <ol>
-          <li>It reduces the complexity of writing our own ClassLoaders for different RPC clients. Servlet
-          container (Tomcat) already uses separate ClassLoaders for different WAR files.</li>
-          <li>Packaging is done by the Servlet container (Tomcat). For each client WAR file, its Servlets
-          only need to worry about its own version of source HDFS clusters.</li>
-        </ol>
-        <p>Note that the inter-communication between servlets in the forwarding war and that in the specific client version war can only be through built-in data types such as int, String, etc, as such data types are loaded first through common classloader. </p>
-      </section>
-      
-      <section>
-        <title> Servlets: Where Data transfer Occurs</title>
-        <p>Proxy server functionality is implemented using servlets deployed under servlet container. Specifically, there are 3 proxy servlets <code>ProxyListPathsServlet</code>, <code>ProxyFileDataServlet</code>, and <code>ProxyStreamFile</code>. Together, they implement the same H(S)FTP interface as the original <code>ListPathsServlet</code>, <code>FileDataServlet</code>, and <code>StreamFile</code> servlets do on an HDFS cluster. In fact, the proxy servlets are subclasses of the original servlets with minor changes like retrieving client UGI from the proxy server, etc. All these three servlets are put into the client war files.</p>
-        <p>The forwarding proxy, which was implemented through <code>ProxyForwardServlet</code>, is put in a separate web application (ROOT.war). All client requests should be sent to the forwarding proxy. The forwarding proxy does not implement any functionality by itself. Instead, it simply forwards client requests to the right web applications with the right servlet paths.</p>
-        <p>Forwarding servlets forward requests to servlets in the right web applications through servlet cross-context communication by setting <code>crossContext="true"</code> in servlet container's configuration file</p>
-        <p>Proxy server will install a servlet, <code>ProxyFileForward</code>, which is a subclass of <code>ProxyForwardServlet</code>, on path /file, which exposes a simple HTTPS GET interface (internally delegates the work to <code>ProxyStreamFile</code> servlet via forwarding mechanism discussed above). This interface supports standard HTTP clients like curl, wget, etc. HTTPS client requests on the wire should look like <code>https://proxy_address/file/file_path</code></p>
-      </section>
-      
-      <section>
-        <title> Load Balancing and Identifying Requests through Domain Names </title>
-        <figure src="images/request-identify.jpg" alt="Request Identification"/> 
-        <p>The delegation module relies on the forwarding WAR to be able to identify the requests so that it can direct the requests to the right HDFS client RPC versions. Identifying the requests through Domain Name, which can be extracted from the request header, is a straightforward way. Note that Domain Name can have many alias through CNAME. By exploiting such a feature, we can create a Domain Name, then create many alias of this domain name, and finally make these alias correspond to different client RPC request versions. As the same time, we may need many servers to do load balancing. We can make all these servers (with different IP addresses) point to the same Domain Name in a Round-robin fashion. By doing this, we can realize default load-balancing if we have multiple through proxy servers running in the back-end.</p>
-      </section>
-    
-    </section>
-    
-    <section>
-      <title> Jetty-based Installation and Configuration </title>
-      <p>With Jetty-based installation, only part of proxy features are supported.</p>
-      <section>
-        <title> Supporting Features </title>
-        <ul>
-          <li>Single Hadoop source cluster data transfer</li>
-          <li>Single Hadoop version data transfer</li>
-          <li>Authenticate users via user SSL certificates with <code>ProxyFilter</code> installed</li>
-          <li>Enforce access control based on configuration files.</li>
-        </ul>
-      </section>
-      
-      <section>
-        <title> Configuration Files </title>
-        <ol>
-          <li>
-            <strong>hdfsproxy-default.xml</strong>
-            <table>
-              <tr>
-                <th>Name</th>
-                <th>Description</th>
-              </tr>
-              <tr>
-                <td>hdfsproxy.https.address</td>
-                <td>the SSL port that hdfsproxy listens on. </td>
-              </tr>
-              <tr>
-                <td>hdfsproxy.hosts</td>
-                <td>location of hdfsproxy-hosts file. </td>
-              </tr>
-              <tr>
-                <td>hdfsproxy.dfs.namenode.address</td>
-                <td>namenode address of the HDFS cluster being proxied. </td>
-              </tr>
-              <tr>
-                <td>hdfsproxy.https.server.keystore.resource</td>
-                <td>location of the resource from which ssl server keystore information will be extracted. </td>
-              </tr>
-              <tr>
-                <td>hdfsproxy.user.permissions.file.location</td>
-                <td>location of the user permissions file. </td>
-              </tr>
-              <tr>
-                <td>hdfsproxy.user.certs.file.location</td>
-                <td>location of the user certs file. </td>
-              </tr>
-              <tr>
-                <td>hdfsproxy.ugi.cache.ugi.lifetime</td>
-                <td> The lifetime (in minutes) of a cached ugi. </td>
-              </tr>
-            </table>     
-          </li>              
-          <li>     
-            <strong>ssl-server.xml</strong>
-            <table>
-              <tr>
-                <th>Name</th>
-                <th>Description</th>
-              </tr>
-              <tr>
-                <td>ssl.server.truststore.location</td>
-                <td>location of the truststore. </td>
-              </tr>
-              <tr>
-                <td>ssl.server.truststore.password</td>
-                <td>truststore password. </td>
-              </tr>
-              <tr>
-                <td>ssl.server.keystore.location</td>
-                <td>location of the keystore. </td>
-              </tr>
-              <tr>
-                <td>ssl.server.keystore.password</td>
-                <td>keystore password. </td>
-              </tr>
-              <tr>
-                <td>ssl.server.keystore.keypassword</td>
-                <td>key password. </td>
-              </tr>
-            </table>
-          </li>
-          <li>     
-            <strong>user-certs.xml</strong>
-            <table>
-              <tr>
-                <th>Name</th>
-                <th>Description</th>
-              </tr>
-              <tr>
-                <td colspan="2">This file defines the mappings from username to comma seperated list of certificate serial numbers that the user is allowed to use. One mapping per user. Wildcard characters, such as "*" and "?", are not recognized. Any leading or trailing whitespaces are stripped/ignored. In order for a user to be able to do "clearUgiCache" and "reloadPermFiles" command, the certification serial number he use must also belong to the user "Admin". 
-                </td>
-              </tr>
-            </table>
-          </li>
-          <li>
-            <strong>user-permissions.xml</strong>
-            <table>
-              <tr>
-                <th>Name</th>
-                <th>Description</th>
-              </tr>
-              <tr>
-                <td colspan="2">This file defines the mappings from user name to comma seperated list of directories/files that the user is allowed to access. One mapping per user. Wildcard characters, such as "*" and "?", are not recognized. For example, to match "/output" directory, one can use "/output" or "/output/", but not "/output/*". Note that any leading or trailing whitespaces are stripped/ignored for the name field. 
-                </td>
-              </tr>
-            </table>
-          </li> 
-        </ol>
-      </section>
-      <section>
-        <title> Build Process </title>        
-        <p>Under <code>$HADOOP_PREFIX</code> do the following <br/>
-          <code> $ ant clean tar</code> <br/>
-          <code> $ cd src/contrib/hdfsproxy/</code> <br/>
-          <code> $ ant clean tar</code> <br/>
-          The <code>hdfsproxy-*.tar.gz</code> file will be generated under <code>$HADOOP_PREFIX/build/contrib/hdfsproxy/</code>. Use this tar ball to proceed for the server start-up/shutdown process after necessary configuration. 
-        </p>
-      </section>  
-      <section>
-        <title> Server Start up and Shutdown</title>        
-        <p> Starting up a Jetty-based HDFS Proxy server is similar to starting up an HDFS cluster. Simply run <code>hdfsproxy</code> shell command. The main configuration file is <code>hdfsproxy-default.xml</code>, which should be on the classpath. <code>hdfsproxy-env.sh</code> can be used to set up environmental variables. In particular, <code>JAVA_HOME</code> should be set. As listed above, additional configuration files include <code>user-certs.xml</code>, <code>user-permissions.xml</code> and <code>ssl-server.xml</code>, which are used to specify allowed user certs, allowed directories/files, and ssl keystore information for the proxy, respectively. The location of these files can be specified in <code>hdfsproxy-default.xml</code>. Environmental variable <code>HDFSPROXY_CONF_DIR</code> can be used to point to the directory where these configuration files are located. The configuration files (<code>hadoop-site.xml</code>, or <code>core-site.xml</code> and <code>hdfs-site.xml</code>) of the proxied HDFS cluster should also be available on the classpath .
-        </p>
-        <p> Mirroring those used in HDFS, a few shell scripts are provided to start and stop a group of proxy servers. The hosts to run hdfsproxy on are specified in <code>hdfsproxy-hosts</code> file, one host per line. All hdfsproxy servers are stateless and run independently from each other.  </p>
-        <p>
-          To start a group of proxy servers, do <br/>
-          <code> $ start-hdfsproxy.sh </code> 
-        </p>
-        <p>
-          To stop a group of proxy servers, do <br/>
-          <code> $ stop-hdfsproxy.sh </code> 
-        </p>
-        <p> 
-          To trigger reloading of <code>user-certs.xml</code> and <code>user-permissions.xml</code> files on all proxy servers listed in the <code>hdfsproxy-hosts</code> file, do <br/>       
-        <code> $ hdfsproxy -reloadPermFiles </code> 
-        </p>
-        <p>To clear the UGI caches on all proxy servers, do <br/>
-          <code> $ hdfsproxy -clearUgiCache </code> 
-        </p>
-      </section>     
-      
-      <section>
-        <title> Verification </title>
-        <p> Use HSFTP client <br/>
-          <code>bin/hadoop fs -ls "hsftp://proxy.address:port/"</code>
-        </p>
-      </section>
-
-    </section>      
-    
-    <section>
-        <title> Tomcat-based Installation and Configuration </title>
-        <p>With tomcat-based installation, all HDFS Proxy features are supported</p>
-        <section>
-          <title> Supporting Features </title>
-          <ul>
-            <li>Multiple Hadoop source cluster data transfer</li>
-            <li>Multiple Hadoop version data transfer</li>
-            <li>Authenticate users via user SSL certificates with <code>ProxyFilter</code> installed</li>
-            <li>Authentication and authorization via LDAP with <code>LdapIpDirFilter</code> installed</li>
-            <li>Access control based on configuration files if <code>ProxyFilter</code> is installed.</li>
-            <li>Access control based on LDAP entries if <code>LdapIpDirFilter</code> is installed.</li>
-            <li>Standard HTTPS Get Support for file transfer</li>
-          </ul>
-        </section>
-        
-        
-        <section>
-          <title> Source Cluster Related Configuration </title>
-          <ol>
-            <li>
-              <strong>hdfsproxy-default.xml</strong>
-              <table>
-                <tr>
-                  <th>Name</th>
-                  <th>Description</th>
-                </tr>
-                <tr>
-                  <td>fs.defaultFS</td>
-                  <td>Source Cluster NameNode address</td>
-                </tr>
-                <tr>
-                  <td>dfs.blocksize</td>
-                  <td>The block size for file tranfers</td>
-                </tr>
-                <tr>
-                  <td>io.file.buffer.size</td>
-                  <td> The size of buffer for use in sequence files. The size of this buffer should probably be a multiple of hardware page size (4096 on Intel x86), and it determines how much data is buffered during read and write operations </td>
-                </tr>
-              </table>   
-            </li>
-          </ol>
-        </section>
-      
-        <section>
-          <title> SSL Related Configuration </title>
-          <ol>
-            <li>
-              <strong>hdfsproxy-default.xml</strong>
-              <table>
-                <tr>
-                  <th>Name</th>
-                  <th>Description</th>
-                </tr>
-                <tr>
-                  <td>hdfsproxy.user.permissions.file.location</td>
-                  <td>location of the user permissions file. </td>
-                </tr>
-                <tr>
-                  <td>hdfsproxy.user.certs.file.location</td>
-                  <td>location of the user certs file. </td>
-                </tr>
-                <tr>
-                  <td>hdfsproxy.ugi.cache.ugi.lifetime</td>
-                  <td> The lifetime (in minutes) of a cached ugi. </td>
-                </tr>
-              </table>     
-            </li>              
-            <li>     
-              <strong>user-certs.xml</strong>
-              <table>
-                <tr>
-                  <th>Name</th>
-                  <th>Description</th>
-                </tr>
-                <tr>
-                  <td colspan="2">This file defines the mappings from username to comma seperated list of certificate serial numbers that the user is allowed to use. One mapping per user. Wildcard characters, such as "*" and "?", are not recognized. Any leading or trailing whitespaces are stripped/ignored. In order for a user to be able to do "clearUgiCache" and "reloadPermFiles" command, the certification serial number he use must also belong to the user "Admin". 
-                  </td>
-                </tr>
-              </table>
-            </li>
-            <li>
-              <strong>user-permissions.xml</strong>
-              <table>
-                <tr>
-                  <th>Name</th>
-                  <th>Description</th>
-                </tr>
-                <tr>
-                  <td colspan="2">This file defines the mappings from user name to comma seperated list of directories/files that the user is allowed to access. One mapping per user. Wildcard characters, such as "*" and "?", are not recognized. For example, to match "/output" directory, one can use "/output" or "/output/", but not "/output/*". Note that any leading or trailing whitespaces are stripped/ignored for the name field. 
-                  </td>
-                </tr>
-              </table>
-            </li> 
-          </ol>
-        </section>
-        
-        <section>
-          <title> LDAP Related Configuration </title>
-          <ol>
-            <li>
-              <strong>hdfsproxy-default.xml</strong>
-              <table>
-                <tr>
-                  <th>Name</th>
-                  <th>Description</th>
-                </tr>
-                <tr>
-                  <td>hdfsproxy.ldap.initial.context.factory</td>
-                  <td>LDAP context factory. </td>
-                </tr>
-                <tr>
-                  <td>hdfsproxy.ldap.provider.url</td>
-                  <td>LDAP server address. </td>
-                </tr>
-                <tr>
-                  <td>hdfsproxy.ldap.role.base</td>
-                  <td>LDAP role base. </td>
-                </tr>
-              </table>     
-            </li>              
-          </ol>
-        </section>
-        
-        
-        <section>
-          <title> Tomcat Server Related Configuration </title>
-          <ol>
-            <li>
-              <strong>tomcat-forward-web.xml</strong>
-              <table>
-                <tr>
-                  <th>Name</th>
-                  <th>Description</th>
-                </tr>
-                <tr>
-                  <td colspan="2">This deployment descritor file defines how servlets and filters are installed in the forwarding war (ROOT.war). The default filter installed is <code>LdapIpDirFilter</code>, you can change to <code>ProxyFilter</code> with <code>org.apache.hadoop.hdfsproxy.ProxyFilter</code> as you <code>filter-class</code>. </td>
-                </tr>
-              </table>     
-            </li>
-            <li>
-              <strong>tomcat-web.xml</strong>
-              <table>                
-                <tr>
-                  <th>Name</th>
-                  <th>Description</th>
-                </tr>
-                <tr>
-                  <td colspan="2">This deployment descritor file defines how servlets and filters are installed in the client war. The default filter installed is <code>LdapIpDirFilter</code>, you can change to <code>ProxyFilter</code> with <code>org.apache.hadoop.hdfsproxy.ProxyFilter</code> as you <code>filter-class</code>. </td>
-                </tr>
-              </table>     
-            </li>
-            <li>
-              <strong>$TOMCAT_HOME/conf/server.xml</strong>
-              <table>                
-                <tr>
-                  <th>Name</th>
-                  <th>Description</th>
-                </tr>
-                <tr>
-                  <td colspan="2"> You need to change Tomcat's server.xml file under $TOMCAT_HOME/conf as detailed in <a href="http://tomcat.apache.org/tomcat-6.0-doc/ssl-howto.html">tomcat 6 ssl-howto</a>. Set <code>clientAuth="true"</code> if you need to authenticate client. 
-                  </td>
-                </tr>
-              </table>     
-            </li>
-            <li>
-              <strong>$TOMCAT_HOME/conf/context.xml</strong>
-              <table>                
-                <tr>
-                  <th>Name</th>
-                  <th>Description</th>
-                </tr>
-                <tr>
-                  <td colspan="2"> You need to change Tomcat's context.xml file under $TOMCAT_HOME/conf by adding <code>crossContext="true"</code> after <code>Context</code>.
-                  </td>
-                </tr>
-              </table>     
-            </li>
-          </ol>
-        </section>
-        <section>
-          <title> Build and Deployment Process </title>  
-          <section>
-            <title> Build forwarding war (ROOT.war) </title>
-            <p>Suppose hdfsproxy-default.xml has been properly configured and it is under ${user.home}/proxy-root-conf dir. Under <code>$HADOOP_PREFIX</code> do the following <br/>
-              <code> $ export HDFSPROXY_CONF_DIR=${user.home}/proxy-root-conf</code> <br/>
-              <code> $ ant clean tar</code> <br/>
-              <code> $ cd src/contrib/hdfsproxy/</code> <br/>
-              <code> $ ant clean forward</code> <br/>
-              The <code>hdfsproxy-forward-*.war</code> file will be generated under <code>$HADOOP_PREFIX/build/contrib/hdfsproxy/</code>. Copy this war file to tomcat's webapps directory and rename it at ROOT.war (if ROOT dir already exists, remove it first) for deployment. 
-            </p>
-          </section>
-          <section>
-            <title> Build cluster client war (client.war) </title>
-            <p>Suppose hdfsproxy-default.xml has been properly configured and it is under ${user.home}/proxy-client-conf dir. Under <code>$HADOOP_PREFIX</code> do the following <br/>
-              <code> $ export HDFSPROXY_CONF_DIR=${user.home}/proxy-client-conf</code> <br/>
-              <code> $ ant clean tar</code> <br/>
-              <code> $ cd src/contrib/hdfsproxy/</code> <br/>
-              <code> $ ant clean war</code> <br/>
-              The <code>hdfsproxy-*.war</code> file will be generated under <code>$HADOOP_PREFIX/build/contrib/hdfsproxy/</code>. Copy this war file to tomcat's webapps directory and rename it properly for deployment. 
-            </p>
-          </section>
-          <section>
-            <title> Handle Multiple Source Clusters </title>
-            <p> To proxy for multiple source clusters, you need to do the following:</p>
-            <ol>
-              <li>Build multiple client war with different names and different hdfsproxy-default.xml configurations</li>
-              <li>Make multiple alias using CNAME of the same Domain Name</li>
-              <li>Make sure the first part of the alias match the corresponding client war file name. For example, you have two source clusters, sc1 and sc2, and you made two alias of the same domain name, proxy1.apache.org and proxy2.apache.org, then you need to name the client war file as proxy1.war and proxy2.war respectively for your deployment.</li>
-            </ol>
-          </section>
-        </section>  
-        
-        <section>
-          <title> Server Start up and Shutdown</title>        
-          <p> Starting up and shutting down Tomcat-based HDFS Proxy server is no more than starting up and shutting down tomcat server with tomcat's bin/startup.sh and bin/shutdown.sh script.</p>
-          <p> If you need to authenticate client certs, you need either set <code>truststoreFile</code> and <code>truststorePass</code> following <a href="http://tomcat.apache.org/tomcat-6.0-doc/ssl-howto.html">tomcat 6 ssl-howto</a> in the configuration stage or give the truststore location by doing the following <br/>
-            <code>export JAVA_OPTS="-Djavax.net.ssl.trustStore=${user.home}/truststore-location -Djavax.net.ssl.trustStorePassword=trustpass"</code> <br/>
-            before you start-up tomcat.
-          </p>
-        </section>     
-        <section>
-          <title> Verification </title>
-          <p>HTTPS client <br/>
-            <code>curl -k "https://proxy.address:port/file/file-path"</code> <br/>
-            <code>wget --no-check-certificate "https://proxy.address:port/file/file-path"</code>
-          </p>
-          <p>HADOOP client <br/>
-            <code>bin/hadoop fs -ls "hsftp://proxy.address:port/"</code>
-          </p>
-        </section>
-        
-    </section>    
-    
-    <section>
-      <title> Hadoop Client Configuration </title>
-      <ul>
-        <li>
-          <strong>ssl-client.xml</strong>
-          <table>            
-            <tr>
-              <th>Name</th>
-              <th>Description</th>
-            </tr>
-            <tr>
-              <td>ssl.client.do.not.authenticate.server</td>
-              <td>if true, trust all server certificates, like curl's -k option</td>
-            </tr>
-            <tr>
-              <td>ssl.client.truststore.location</td>
-              <td>Location of truststore</td>
-            </tr>
-            <tr>
-              <td>ssl.client.truststore.password</td>
-              <td> truststore password </td>
-            </tr>
-            <tr>
-              <td>ssl.client.truststore.type</td>
-              <td> truststore type </td>
-            </tr>
-            <tr>
-              <td>ssl.client.keystore.location</td>
-              <td> Location of keystore </td>
-            </tr>
-            <tr>
-              <td>ssl.client.keystore.password</td>
-              <td> keystore password </td>
-            </tr>
-            <tr>
-              <td>ssl.client.keystore.type</td>
-              <td> keystore type </td>
-            </tr>
-            <tr>
-              <td>ssl.client.keystore.keypassword</td>
-              <td> keystore key password </td>
-            </tr>
-            <tr>
-              <td>ssl.expiration.warn.days</td>
-              <td> server certificate expiration war days threshold, 0 means no warning should be issued </td>
-            </tr>
-          </table>   
-        </li>
-      </ul>
-    </section>
-
-
-
-  </body>
-</document>
diff --git a/hdfs/src/docs/src/documentation/content/xdocs/site.xml b/hdfs/src/docs/src/documentation/content/xdocs/site.xml
index baf57b4..61c6d43 100644
--- a/hdfs/src/docs/src/documentation/content/xdocs/site.xml
+++ b/hdfs/src/docs/src/documentation/content/xdocs/site.xml
@@ -43,7 +43,6 @@
       <hdfs_SLG        		label="Synthetic Load Generator"  href="SLG_user_guide.html" />
       <hdfs_imageviewer	label="Offline Image Viewer"	href="hdfs_imageviewer.html" />
       <hdfs_editsviewer	label="Offline Edits Viewer"	href="hdfs_editsviewer.html" />
-      <hdfsproxy 			    label="HDFS Proxy" href="hdfsproxy.html"/>
       <hftp 			    label="HFTP" href="hftp.html"/>
       <faultinject_framework label="Fault Injection"  href="faultinject_framework.html" /> 
       <hdfs_libhdfs   		label="C API libhdfs" href="libhdfs.html" /> 
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java b/hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java
index 25a67a8..cdd6a4f 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -53,6 +53,7 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -149,48 +150,38 @@
 
     Conf(Configuration conf) {
       maxBlockAcquireFailures = conf.getInt(
-          DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
-          DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
-      confTime = conf.getInt(
-          DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
+          DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
+          DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
+      confTime = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
           HdfsConstants.WRITE_TIMEOUT);
       ioBufferSize = conf.getInt(
           CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
           CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
-      bytesPerChecksum = conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
-          DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
-      socketTimeout = conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
+      bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
+          DFS_BYTES_PER_CHECKSUM_DEFAULT);
+      socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
           HdfsConstants.READ_TIMEOUT);
       /** dfs.write.packet.size is an internal config variable */
-      writePacketSize = conf.getInt(
-          DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
-          DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
-      defaultBlockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+      writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
+          DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
+      defaultBlockSize = conf.getLong(DFS_BLOCK_SIZE_KEY,
           DEFAULT_BLOCK_SIZE);
       defaultReplication = (short) conf.getInt(
-          DFSConfigKeys.DFS_REPLICATION_KEY,
-          DFSConfigKeys.DFS_REPLICATION_DEFAULT);
+          DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
       taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
-      socketCacheCapacity = conf.getInt(
-          DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
-          DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT);
-      prefetchSize = conf.getLong(
-          DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY,
+      socketCacheCapacity = conf.getInt(DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
+          DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT);
+      prefetchSize = conf.getLong(DFS_CLIENT_READ_PREFETCH_SIZE_KEY,
           10 * defaultBlockSize);
       timeWindow = conf
-          .getInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 3000);
-      nCachedConnRetry = conf.getInt(
-          DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY,
-          DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
-      nBlockWriteRetry = conf.getInt(
-          DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY,
-          DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT);
+          .getInt(DFS_CLIENT_RETRY_WINDOW_BASE, 3000);
+      nCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY,
+          DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
+      nBlockWriteRetry = conf.getInt(DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY,
+          DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT);
       nBlockWriteLocateFollowingRetry = conf
-          .getInt(
-              DFSConfigKeys
-              .DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
-              DFSConfigKeys
-              .DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
+          .getInt(DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
+              DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
       uMask = FsPermission.getUMask(conf);
     }
   }
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 6879bb7..88f5a9b 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -42,9 +42,11 @@
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator;
+import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdfs.server.blockmanagement.UnderReplicatedBlocks.BlockIterator;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
@@ -53,6 +55,8 @@
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.util.Daemon;
 
@@ -156,7 +160,7 @@
   public final int defaultReplication;
   /** The maximum number of entries returned by getCorruptInodes() */
   final int maxCorruptFilesReturned;
-  
+
   /** variable to enable check for enough racks */
   final boolean shouldCheckForEnoughRacks;
 
@@ -208,12 +212,12 @@
     this.replicationRecheckInterval = 
       conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
                   DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L;
-    FSNamesystem.LOG.info("defaultReplication = " + defaultReplication);
-    FSNamesystem.LOG.info("maxReplication = " + maxReplication);
-    FSNamesystem.LOG.info("minReplication = " + minReplication);
-    FSNamesystem.LOG.info("maxReplicationStreams = " + maxReplicationStreams);
-    FSNamesystem.LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks);
-    FSNamesystem.LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
+    LOG.info("defaultReplication = " + defaultReplication);
+    LOG.info("maxReplication     = " + maxReplication);
+    LOG.info("minReplication     = " + minReplication);
+    LOG.info("maxReplicationStreams      = " + maxReplicationStreams);
+    LOG.info("shouldCheckForEnoughRacks  = " + shouldCheckForEnoughRacks);
+    LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
   }
 
   public void activate(Configuration conf) {
@@ -294,15 +298,14 @@
       }
     }
 
-    //
     // Dump blocks from pendingReplication
-    //
     pendingReplications.metaSave(out);
 
-    //
     // Dump blocks that are waiting to be deleted
-    //
     dumpRecentInvalidateSets(out);
+
+    // Dump all datanodes
+    getDatanodeManager().datanodeDump(out);
   }
 
   /**
@@ -341,9 +344,7 @@
         namesystem.dir.updateSpaceConsumed(path, 0, -diff
             * fileINode.getReplication());
       } catch (IOException e) {
-        FSNamesystem.LOG
-            .warn("Unexpected exception while updating disk space : "
-                + e.getMessage());
+        LOG.warn("Unexpected exception while updating disk space.", e);
       }
     }
   }
@@ -453,7 +454,7 @@
   /**
    * Get all valid locations of the block
    */
-  public ArrayList<String> getValidLocations(Block block) {
+  private List<String> getValidLocations(Block block) {
     ArrayList<String> machineSet =
       new ArrayList<String>(blocksMap.numNodes(block));
     for(Iterator<DatanodeDescriptor> it =
@@ -514,7 +515,7 @@
     final int numCorruptNodes = countNodes(blk).corruptReplicas();
     final int numCorruptReplicas = corruptReplicas.numCorruptReplicas(blk);
     if (numCorruptNodes != numCorruptReplicas) {
-      FSNamesystem.LOG.warn("Inconsistent number of corrupt replicas for "
+      LOG.warn("Inconsistent number of corrupt replicas for "
           + blk + " blockMap has " + numCorruptNodes
           + " but corrupt replicas map has " + numCorruptReplicas);
     }
@@ -562,6 +563,49 @@
                             minReplication);
   }
 
+   /** Get all blocks with location information from a datanode. */
+  public BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
+      final long size) throws UnregisteredNodeException {
+    final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
+    if (node == null) {
+      NameNode.stateChangeLog.warn("BLOCK* NameSystem.getBlocks: "
+          + "Asking for blocks from an unrecorded node " + datanode.getName());
+      throw new HadoopIllegalArgumentException(
+          "Datanode " + datanode.getName() + " not found.");
+    }
+
+    int numBlocks = node.numBlocks();
+    if(numBlocks == 0) {
+      return new BlocksWithLocations(new BlockWithLocations[0]);
+    }
+    Iterator<BlockInfo> iter = node.getBlockIterator();
+    int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
+    // skip blocks
+    for(int i=0; i<startBlock; i++) {
+      iter.next();
+    }
+    List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
+    long totalSize = 0;
+    BlockInfo curBlock;
+    while(totalSize<size && iter.hasNext()) {
+      curBlock = iter.next();
+      if(!curBlock.isComplete())  continue;
+      totalSize += addBlock(curBlock, results);
+    }
+    if(totalSize<size) {
+      iter = node.getBlockIterator(); // start from the beginning
+      for(int i=0; i<startBlock&&totalSize<size; i++) {
+        curBlock = iter.next();
+        if(!curBlock.isComplete())  continue;
+        totalSize += addBlock(curBlock, results);
+      }
+    }
+
+    return new BlocksWithLocations(
+        results.toArray(new BlockWithLocations[results.size()]));
+  }
+
+   
   /** Remove a datanode. */
   public void removeDatanode(final DatanodeDescriptor node) {
     final Iterator<? extends Block> it = node.getBlockIterator();
@@ -660,7 +704,7 @@
     for(Map.Entry<String,Collection<Block>> entry : recentInvalidateSets.entrySet()) {
       Collection<Block> blocks = entry.getValue();
       if (blocks.size() > 0) {
-        out.println(namesystem.getDatanode(entry.getKey()).getName() + blocks);
+        out.println(datanodeManager.getDatanode(entry.getKey()).getName() + blocks);
       }
     }
   }
@@ -684,7 +728,7 @@
   private void markBlockAsCorrupt(BlockInfo storedBlock,
                                   DatanodeInfo dn) throws IOException {
     assert storedBlock != null : "storedBlock should not be null";
-    DatanodeDescriptor node = namesystem.getDatanode(dn);
+    DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
     if (node == null) {
       throw new IOException("Cannot mark block " + 
                             storedBlock.getBlockName() +
@@ -723,7 +767,7 @@
       throws IOException {
     NameNode.stateChangeLog.info("DIR* NameSystem.invalidateBlock: "
                                  + blk + " on " + dn.getName());
-    DatanodeDescriptor node = namesystem.getDatanode(dn);
+    DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
     if (node == null) {
       throw new IOException("Cannot invalidate block " + blk +
                             " because datanode " + dn.getName() +
@@ -748,7 +792,7 @@
     }
   }
 
-  public void updateState() {
+  void updateState() {
     pendingReplicationBlocksCount = pendingReplications.size();
     underReplicatedBlocksCount = neededReplications.size();
     corruptReplicaBlocksCount = corruptReplicas.size();
@@ -869,7 +913,7 @@
           Block block = neededReplicationsIterator.next();
           int priority = neededReplicationsIterator.getPriority();
           if (priority < 0 || priority >= blocksToReplicate.size()) {
-            FSNamesystem.LOG.warn("Unexpected replication priority: "
+            LOG.warn("Unexpected replication priority: "
                 + priority + " " + block);
           } else {
             blocksToReplicate.get(priority).add(block);
@@ -1134,7 +1178,7 @@
    * If there were any replication requests that timed out, reap them
    * and put them back into the neededReplication queue
    */
-  public void processPendingReplications() {
+  private void processPendingReplications() {
     Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
     if (timedOutItems != null) {
       namesystem.writeLock();
@@ -1338,8 +1382,8 @@
       Collection<BlockInfo> toCorrupt,
       Collection<StatefulBlockInfo> toUC) {
     
-    if(FSNamesystem.LOG.isDebugEnabled()) {
-      FSNamesystem.LOG.debug("Reported block " + block
+    if(LOG.isDebugEnabled()) {
+      LOG.debug("Reported block " + block
           + " on " + dn.getName() + " size " + block.getNumBytes()
           + " replicaState = " + reportedState);
     }
@@ -1355,8 +1399,8 @@
     BlockUCState ucState = storedBlock.getBlockUCState();
     
     // Block is on the NN
-    if(FSNamesystem.LOG.isDebugEnabled()) {
-      FSNamesystem.LOG.debug("In memory blockUCState = " + ucState);
+    if(LOG.isDebugEnabled()) {
+      LOG.debug("In memory blockUCState = " + ucState);
     }
 
     // Ignore replicas already scheduled to be removed from the DN
@@ -1411,7 +1455,7 @@
     case RUR:       // should not be reported
     case TEMPORARY: // should not be reported
     default:
-      FSNamesystem.LOG.warn("Unexpected replica state " + reportedState
+      LOG.warn("Unexpected replica state " + reportedState
           + " for block: " + storedBlock + 
           " on " + dn.getName() + " size " + storedBlock.getNumBytes());
       return true;
@@ -1579,7 +1623,7 @@
     int corruptReplicasCount = corruptReplicas.numCorruptReplicas(storedBlock);
     int numCorruptNodes = num.corruptReplicas();
     if (numCorruptNodes != corruptReplicasCount) {
-      FSNamesystem.LOG.warn("Inconsistent number of corrupt replicas for " +
+      LOG.warn("Inconsistent number of corrupt replicas for " +
           storedBlock + "blockMap has " + numCorruptNodes + 
           " but corrupt replicas map has " + corruptReplicasCount);
     }
@@ -1662,10 +1706,10 @@
     } finally {
       namesystem.writeUnlock();
     }
-    FSNamesystem.LOG.info("Total number of blocks = " + blocksMap.size());
-    FSNamesystem.LOG.info("Number of invalid blocks = " + nrInvalid);
-    FSNamesystem.LOG.info("Number of under-replicated blocks = " + nrUnderReplicated);
-    FSNamesystem.LOG.info("Number of  over-replicated blocks = " + nrOverReplicated);
+    LOG.info("Total number of blocks            = " + blocksMap.size());
+    LOG.info("Number of invalid blocks          = " + nrInvalid);
+    LOG.info("Number of under-replicated blocks = " + nrUnderReplicated);
+    LOG.info("Number of  over-replicated blocks = " + nrOverReplicated);
   }
 
   /**
@@ -1700,6 +1744,7 @@
         addedNode, delNodeHint, blockplacement);
   }
 
+
   public void addToExcessReplicate(DatanodeInfo dn, Block block) {
     assert namesystem.hasWriteLock();
     Collection<Block> excessBlocks = excessReplicateMap.get(dn.getStorageID());
@@ -1774,6 +1819,21 @@
   }
 
   /**
+   * Get all valid locations of the block & add the block to results
+   * return the length of the added block; 0 if the block is not added
+   */
+  private long addBlock(Block block, List<BlockWithLocations> results) {
+    final List<String> machineSet = getValidLocations(block);
+    if(machineSet.size() == 0) {
+      return 0;
+    } else {
+      results.add(new BlockWithLocations(block, 
+          machineSet.toArray(new String[machineSet.size()])));
+      return block.getNumBytes();
+    }
+  }
+
+  /**
    * The given node is reporting that it received a certain block.
    */
   public void addBlock(DatanodeDescriptor node, Block block, String delHint)
@@ -1784,7 +1844,7 @@
     // get the deletion hint node
     DatanodeDescriptor delHintNode = null;
     if (delHint != null && delHint.length() != 0) {
-      delHintNode = namesystem.getDatanode(delHint);
+      delHintNode = datanodeManager.getDatanode(delHint);
       if (delHintNode == null) {
         NameNode.stateChangeLog.warn("BLOCK* NameSystem.blockReceived: "
             + block + " is expected to be removed from an unrecorded node "
@@ -1893,7 +1953,7 @@
       nodeList.append(node.name);
       nodeList.append(" ");
     }
-    FSNamesystem.LOG.info("Block: " + block + ", Expected Replicas: "
+    LOG.info("Block: " + block + ", Expected Replicas: "
         + curExpectedReplicas + ", live replicas: " + curReplicas
         + ", corrupt replicas: " + num.corruptReplicas()
         + ", decommissioned replicas: " + num.decommissionedReplicas()
@@ -2071,7 +2131,7 @@
         return 0;
       // get blocks to invalidate for the nodeId
       assert nodeId != null;
-      DatanodeDescriptor dn = namesystem.getDatanode(nodeId);
+      final DatanodeDescriptor dn = datanodeManager.getDatanode(nodeId);
       if (dn == null) {
         removeFromInvalidates(nodeId);
         return 0;
@@ -2082,11 +2142,11 @@
         return 0;
 
       ArrayList<Block> blocksToInvalidate = new ArrayList<Block>(
-          namesystem.blockInvalidateLimit);
+          getDatanodeManager().blockInvalidateLimit);
 
       // # blocks that can be sent in one message is limited
       Iterator<Block> it = invalidateSet.iterator();
-      for (int blkCount = 0; blkCount < namesystem.blockInvalidateLimit
+      for (int blkCount = 0; blkCount < getDatanodeManager().blockInvalidateLimit
           && it.hasNext(); blkCount++) {
         blocksToInvalidate.add(it.next());
         it.remove();
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
index fe2eb3e..e1e8c9c 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
@@ -22,6 +22,8 @@
 import java.util.HashMap;
 import java.util.List;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -38,7 +40,8 @@
  */
 @InterfaceAudience.Private
 public abstract class BlockPlacementPolicy {
-    
+  static final Log LOG = LogFactory.getLog(BlockPlacementPolicy.class);
+
   @InterfaceAudience.Private
   public static class NotEnoughReplicasException extends Exception {
     private static final long serialVersionUID = 1L;
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index bac1e42..c5f8b9c 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -212,7 +212,7 @@
       chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, 
                    blocksize, maxNodesPerRack, results);
     } catch (NotEnoughReplicasException e) {
-      FSNamesystem.LOG.warn("Not able to place enough replicas, still in need of "
+      LOG.warn("Not able to place enough replicas, still in need of "
                + numOfReplicas + " to reach " + totalReplicasExpected + "\n"
                + e.getMessage());
     }
@@ -343,7 +343,7 @@
     int numOfAvailableNodes =
       clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet());
     StringBuilder builder = null;
-    if (FSNamesystem.LOG.isDebugEnabled()) {
+    if (LOG.isDebugEnabled()) {
       builder = threadLocalBuilder.get();
       builder.setLength(0);
       builder.append("[");
@@ -366,7 +366,7 @@
     }
 
     String detail = enableDebugLogging;
-    if (FSNamesystem.LOG.isDebugEnabled()) {
+    if (LOG.isDebugEnabled()) {
       if (badTarget && builder != null) {
         detail = builder.append("]").toString();
         builder.setLength(0);
@@ -388,7 +388,7 @@
     int numOfAvailableNodes =
       clusterMap.countNumOfAvailableNodes(nodes, excludedNodes.keySet());
     StringBuilder builder = null;
-    if (FSNamesystem.LOG.isDebugEnabled()) {
+    if (LOG.isDebugEnabled()) {
       builder = threadLocalBuilder.get();
       builder.setLength(0);
       builder.append("[");
@@ -412,7 +412,7 @@
       
     if (numOfReplicas>0) {
       String detail = enableDebugLogging;
-      if (FSNamesystem.LOG.isDebugEnabled()) {
+      if (LOG.isDebugEnabled()) {
         if (badTarget && builder != null) {
           detail = builder.append("]").toString();
           builder.setLength(0);
@@ -439,7 +439,7 @@
                                List<DatanodeDescriptor> results) {
     // check if the node is (being) decommissed
     if (node.isDecommissionInProgress() || node.isDecommissioned()) {
-      if(FSNamesystem.LOG.isDebugEnabled()) {
+      if(LOG.isDebugEnabled()) {
         threadLocalBuilder.get().append(node.toString()).append(": ")
           .append("Node ").append(NodeBase.getPath(node))
           .append(" is not chosen because the node is (being) decommissioned ");
@@ -451,7 +451,7 @@
                      (node.getBlocksScheduled() * blockSize); 
     // check the remaining capacity of the target machine
     if (blockSize* FSConstants.MIN_BLOCKS_FOR_WRITE>remaining) {
-      if(FSNamesystem.LOG.isDebugEnabled()) {
+      if(LOG.isDebugEnabled()) {
         threadLocalBuilder.get().append(node.toString()).append(": ")
           .append("Node ").append(NodeBase.getPath(node))
           .append(" is not chosen because the node does not have enough space ");
@@ -467,7 +467,7 @@
         avgLoad = (double)stats.getTotalLoad()/size;
       }
       if (node.getXceiverCount() > (2.0 * avgLoad)) {
-        if(FSNamesystem.LOG.isDebugEnabled()) {
+        if(LOG.isDebugEnabled()) {
           threadLocalBuilder.get().append(node.toString()).append(": ")
             .append("Node ").append(NodeBase.getPath(node))
             .append(" is not chosen because the node is too busy ");
@@ -487,7 +487,7 @@
       }
     }
     if (counter>maxTargetPerLoc) {
-      if(FSNamesystem.LOG.isDebugEnabled()) {
+      if(LOG.isDebugEnabled()) {
         threadLocalBuilder.get().append(node.toString()).append(": ")
           .append("Node ").append(NodeBase.getPath(node))
           .append(" is not chosen because the rack has too many chosen nodes ");
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index db0132f..3d7a0fd 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -19,16 +19,22 @@
 
 import java.io.DataInput;
 import java.io.IOException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Queue;
+import java.util.Set;
+import java.util.TreeSet;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.io.WritableUtils;
 
 /**************************************************
@@ -326,7 +332,7 @@
   void addBlockToBeRecovered(BlockInfoUnderConstruction block) {
     if(recoverBlocks.contains(block)) {
       // this prevents adding the same block twice to the recovery queue
-      FSNamesystem.LOG.info("Block " + block +
+      BlockManager.LOG.info("Block " + block +
                             " is already in the recovery queue.");
       return;
     }
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 5540f1f..2258769 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.io.IOException;
+import java.io.PrintWriter;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.ArrayList;
@@ -25,7 +26,9 @@
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
+import java.util.NavigableMap;
 import java.util.Set;
+import java.util.TreeMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -35,12 +38,22 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
+import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
 import org.apache.hadoop.ipc.Server;
@@ -48,6 +61,7 @@
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.ScriptBasedMapping;
+import org.apache.hadoop.util.CyclicIteration;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.HostsFileReader;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -62,6 +76,30 @@
 
   final FSNamesystem namesystem;
 
+  /**
+   * Stores the datanode -> block map.  
+   * <p>
+   * Done by storing a set of {@link DatanodeDescriptor} objects, sorted by 
+   * storage id. In order to keep the storage map consistent it tracks 
+   * all storages ever registered with the namenode.
+   * A descriptor corresponding to a specific storage id can be
+   * <ul> 
+   * <li>added to the map if it is a new storage id;</li>
+   * <li>updated with a new datanode started as a replacement for the old one 
+   * with the same storage id; and </li>
+   * <li>removed if and only if an existing datanode is restarted to serve a
+   * different storage id.</li>
+   * </ul> <br>
+   * The list of the {@link DatanodeDescriptor}s in the map is checkpointed
+   * in the namespace image file. Only the {@link DatanodeInfo} part is 
+   * persistent, the list of blocks is restored from the datanode block
+   * reports. 
+   * <p>
+   * Mapping: StorageID -> DatanodeDescriptor
+   */
+  private final NavigableMap<String, DatanodeDescriptor> datanodeMap
+      = new TreeMap<String, DatanodeDescriptor>();
+
   /** Cluster network topology */
   private final NetworkTopology networktopology = new NetworkTopology();
 
@@ -71,7 +109,12 @@
   private final DNSToSwitchMapping dnsToSwitchMapping;
 
   /** Read include/exclude files*/
-  private final HostsFileReader hostsReader; 
+  private final HostsFileReader hostsReader;
+
+  /** The period to wait for datanode heartbeat.*/
+  private final long heartbeatExpireInterval;
+  /** Ask Datanode only up to this many blocks to delete. */
+  final int blockInvalidateLimit;
   
   DatanodeManager(final FSNamesystem namesystem, final Configuration conf
       ) throws IOException {
@@ -90,6 +133,19 @@
     if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
       dnsToSwitchMapping.resolve(new ArrayList<String>(hostsReader.getHosts()));
     }
+    
+    final long heartbeatIntervalSeconds = conf.getLong(
+        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
+        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT);
+    final int heartbeatRecheckInterval = conf.getInt(
+        DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
+        DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
+    this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval
+        + 10 * 1000 * heartbeatIntervalSeconds;
+    this.blockInvalidateLimit = Math.max(20*(int)(heartbeatIntervalSeconds),
+        DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
+    LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
+        + "=" + this.blockInvalidateLimit);
   }
 
   private Daemon decommissionthread = null;
@@ -124,20 +180,88 @@
       Arrays.sort(b.getLocations(), DFSUtil.DECOM_COMPARATOR);
     }    
   }
-  
+
+  CyclicIteration<String, DatanodeDescriptor> getDatanodeCyclicIteration(
+      final String firstkey) {
+    return new CyclicIteration<String, DatanodeDescriptor>(
+        datanodeMap, firstkey);
+  }
+
   /** @return the datanode descriptor for the host. */
   public DatanodeDescriptor getDatanodeByHost(final String host) {
     return host2DatanodeMap.getDatanodeByHost(host);
   }
 
+  /** Get a datanode descriptor given corresponding storageID */
+  DatanodeDescriptor getDatanode(final String storageID) {
+    return datanodeMap.get(storageID);
+  }
+
+  /**
+   * Get data node by storage ID.
+   * 
+   * @param nodeID
+   * @return DatanodeDescriptor or null if the node is not found.
+   * @throws UnregisteredNodeException
+   */
+  public DatanodeDescriptor getDatanode(DatanodeID nodeID
+      ) throws UnregisteredNodeException {
+    final DatanodeDescriptor node = getDatanode(nodeID.getStorageID());
+    if (node == null) 
+      return null;
+    if (!node.getName().equals(nodeID.getName())) {
+      final UnregisteredNodeException e = new UnregisteredNodeException(
+          nodeID, node);
+      NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: "
+                                    + e.getLocalizedMessage());
+      throw e;
+    }
+    return node;
+  }
+
+  /** Prints information about all datanodes. */
+  void datanodeDump(final PrintWriter out) {
+    synchronized (datanodeMap) {
+      out.println("Metasave: Number of datanodes: " + datanodeMap.size());
+      for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();) {
+        DatanodeDescriptor node = it.next();
+        out.println(node.dumpDatanode());
+      }
+    }
+  }
+
+  /** Remove a dead datanode. */
+  public void removeDeadDatanode(final DatanodeID nodeID) {
+    synchronized(namesystem.heartbeats) {
+      synchronized(datanodeMap) {
+        DatanodeDescriptor d;
+        try {
+          d = getDatanode(nodeID);
+        } catch(IOException e) {
+          d = null;
+        }
+        if (d != null && isDatanodeDead(d)) {
+          NameNode.stateChangeLog.info(
+              "BLOCK* removeDeadDatanode: lost heartbeat from " + d.getName());
+          namesystem.removeDatanode(d);
+        }
+      }
+    }
+  }
+
+  /** Is the datanode dead? */
+  public boolean isDatanodeDead(DatanodeDescriptor node) {
+    return (node.getLastUpdate() <
+            (Util.now() - heartbeatExpireInterval));
+  }
+
   /** Add a datanode. */
   private void addDatanode(final DatanodeDescriptor node) {
     // To keep host2DatanodeMap consistent with datanodeMap,
     // remove  from host2DatanodeMap the datanodeDescriptor removed
     // from datanodeMap before adding node to host2DatanodeMap.
-    synchronized (namesystem.datanodeMap) {
-      host2DatanodeMap.remove(
-          namesystem.datanodeMap.put(node.getStorageID(), node));
+    synchronized(datanodeMap) {
+      host2DatanodeMap.remove(datanodeMap.put(node.getStorageID(), node));
     }
 
     host2DatanodeMap.add(node);
@@ -152,8 +276,8 @@
   /** Physically remove node from datanodeMap. */
   private void wipeDatanode(final DatanodeID node) throws IOException {
     final String key = node.getStorageID();
-    synchronized (namesystem.datanodeMap) {
-      host2DatanodeMap.remove(namesystem.datanodeMap.remove(key));
+    synchronized (datanodeMap) {
+      host2DatanodeMap.remove(datanodeMap.remove(key));
     }
     if (LOG.isDebugEnabled()) {
       LOG.debug(getClass().getSimpleName() + ".wipeDatanode("
@@ -315,7 +439,7 @@
     String newID = null;
     while(newID == null) {
       newID = "DS" + Integer.toString(DFSUtil.getRandom().nextInt());
-      if (namesystem.datanodeMap.get(newID) != null)
+      if (datanodeMap.get(newID) != null)
         newID = null;
     }
     return newID;
@@ -350,7 +474,7 @@
         + "node registration from " + nodeReg.getName()
         + " storage " + nodeReg.getStorageID());
 
-    DatanodeDescriptor nodeS = namesystem.datanodeMap.get(nodeReg.getStorageID());
+    DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
     DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getName());
       
     if (nodeN != null && nodeN != nodeS) {
@@ -461,7 +585,7 @@
    * 4. Removed from exclude --> stop decommission.
    */
   public void refreshDatanodes() throws IOException {
-    for(DatanodeDescriptor node : namesystem.datanodeMap.values()) {
+    for(DatanodeDescriptor node : datanodeMap.values()) {
       // Check if not include.
       if (!inHostsList(node, null)) {
         node.setDisallowed(true);  // case 2.
@@ -475,6 +599,45 @@
     }
   }
 
+  /** @return the number of live datanodes. */
+  public int getNumLiveDataNodes() {
+    int numLive = 0;
+    synchronized (datanodeMap) {
+      for(DatanodeDescriptor dn : datanodeMap.values()) {
+        if (!isDatanodeDead(dn) ) {
+          numLive++;
+        }
+      }
+    }
+    return numLive;
+  }
+
+  /** @return the number of dead datanodes. */
+  public int getNumDeadDataNodes() {
+    int numDead = 0;
+    synchronized (datanodeMap) {   
+      for(DatanodeDescriptor dn : datanodeMap.values()) {
+        if (isDatanodeDead(dn) ) {
+          numDead++;
+        }
+      }
+    }
+    return numDead;
+  }
+
+  /** Fetch live and dead datanodes. */
+  public void fetchDatanodess(final List<DatanodeDescriptor> live, 
+      final List<DatanodeDescriptor> dead) {
+    final List<DatanodeDescriptor> results =
+        getDatanodeListForReport(DatanodeReportType.ALL);    
+    for(DatanodeDescriptor node : results) {
+      if (isDatanodeDead(node))
+        dead.add(node);
+      else
+        live.add(node);
+    }
+  }
+
   /** For generating datanode reports */
   public List<DatanodeDescriptor> getDatanodeListForReport(
       final DatanodeReportType type) {
@@ -499,13 +662,13 @@
 
     ArrayList<DatanodeDescriptor> nodes = null;
     
-    synchronized (namesystem.datanodeMap) {
-      nodes = new ArrayList<DatanodeDescriptor>(namesystem.datanodeMap.size() + 
+    synchronized(datanodeMap) {
+      nodes = new ArrayList<DatanodeDescriptor>(datanodeMap.size() + 
                                                 mustList.size());
-      Iterator<DatanodeDescriptor> it = namesystem.datanodeMap.values().iterator();
+      Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator();
       while (it.hasNext()) { 
         DatanodeDescriptor dn = it.next();
-        boolean isDead = namesystem.isDatanodeDead(dn);
+        final boolean isDead = isDatanodeDead(dn);
         if ( (isDead && listDeadNodes) || (!isDead && listLiveNodes) ) {
           nodes.add(dn);
         }
@@ -537,4 +700,77 @@
     }
     return nodes;
   }
+  
+  private void setDatanodeDead(DatanodeDescriptor node) throws IOException {
+    node.setLastUpdate(0);
+  }
+
+  /** Handle heartbeat from datanodes. */
+  public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
+      final String blockPoolId,
+      long capacity, long dfsUsed, long remaining, long blockPoolUsed,
+      int xceiverCount, int maxTransfers, int failedVolumes
+      ) throws IOException {
+    synchronized (namesystem.heartbeats) {
+      synchronized (datanodeMap) {
+        DatanodeDescriptor nodeinfo = null;
+        try {
+          nodeinfo = getDatanode(nodeReg);
+        } catch(UnregisteredNodeException e) {
+          return new DatanodeCommand[]{DatanodeCommand.REGISTER};
+        }
+        
+        // Check if this datanode should actually be shutdown instead. 
+        if (nodeinfo != null && nodeinfo.isDisallowed()) {
+          setDatanodeDead(nodeinfo);
+          throw new DisallowedDatanodeException(nodeinfo);
+        }
+         
+        if (nodeinfo == null || !nodeinfo.isAlive) {
+          return new DatanodeCommand[]{DatanodeCommand.REGISTER};
+        }
+
+        namesystem.updateStats(nodeinfo, false);
+        nodeinfo.updateHeartbeat(capacity, dfsUsed, remaining, blockPoolUsed,
+            xceiverCount, failedVolumes);
+        namesystem.updateStats(nodeinfo, true);
+        
+        //check lease recovery
+        BlockInfoUnderConstruction[] blocks = nodeinfo
+            .getLeaseRecoveryCommand(Integer.MAX_VALUE);
+        if (blocks != null) {
+          BlockRecoveryCommand brCommand = new BlockRecoveryCommand(
+              blocks.length);
+          for (BlockInfoUnderConstruction b : blocks) {
+            brCommand.add(new RecoveringBlock(
+                new ExtendedBlock(blockPoolId, b), b.getExpectedLocations(), b
+                    .getBlockRecoveryId()));
+          }
+          return new DatanodeCommand[] { brCommand };
+        }
+
+        final List<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>(3);
+        //check pending replication
+        List<BlockTargetPair> pendingList = nodeinfo.getReplicationCommand(
+              maxTransfers);
+        if (pendingList != null) {
+          cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
+              pendingList));
+        }
+        //check block invalidation
+        Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
+        if (blks != null) {
+          cmds.add(new BlockCommand(DatanodeProtocol.DNA_INVALIDATE,
+              blockPoolId, blks));
+        }
+        
+        namesystem.addKeyUpdateCommand(cmds, nodeinfo);
+        if (!cmds.isEmpty()) {
+          return cmds.toArray(new DatanodeCommand[cmds.size()]);
+        }
+      }
+    }
+
+    return null;
+  }
 }
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 4e88e4d..8275dec 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -83,8 +83,8 @@
     private void check() {
       int count = 0;
       for(Map.Entry<String, DatanodeDescriptor> entry
-          : new CyclicIteration<String, DatanodeDescriptor>(
-              fsnamesystem.datanodeMap, firstkey)) {
+          : blockManager.getDatanodeManager().getDatanodeCyclicIteration(
+              firstkey)) {
         final DatanodeDescriptor d = entry.getValue();
         firstkey = entry.getKey();
 
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
index 01c2fe1..e07cf9b 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
@@ -17,14 +17,18 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-
 import static org.apache.hadoop.hdfs.server.common.Util.now;
-import org.apache.hadoop.util.*;
-import java.io.*;
-import java.util.*;
+
+import java.io.PrintWriter;
 import java.sql.Time;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.util.Daemon;
 
 /***************************************************
  * PendingReplicationBlocks does the bookkeeping of all
@@ -38,6 +42,8 @@
  *
  ***************************************************/
 class PendingReplicationBlocks {
+  private static final Log LOG = BlockManager.LOG;
+
   private Map<Block, PendingBlockInfo> pendingReplications;
   private ArrayList<Block> timedOutItems;
   Daemon timerThread = null;
@@ -87,9 +93,8 @@
     synchronized (pendingReplications) {
       PendingBlockInfo found = pendingReplications.get(block);
       if (found != null) {
-        if(FSNamesystem.LOG.isDebugEnabled()) {
-          FSNamesystem.LOG.debug("Removing pending replication for block" +
-              block);
+        if(LOG.isDebugEnabled()) {
+          LOG.debug("Removing pending replication for " + block);
         }
         found.decrementReplicas();
         if (found.getNumReplicas() <= 0) {
@@ -186,9 +191,8 @@
           pendingReplicationCheck();
           Thread.sleep(period);
         } catch (InterruptedException ie) {
-          if(FSNamesystem.LOG.isDebugEnabled()) {
-            FSNamesystem.LOG.debug(
-                "PendingReplicationMonitor thread received exception. " + ie);
+          if(LOG.isDebugEnabled()) {
+            LOG.debug("PendingReplicationMonitor thread is interrupted.", ie);
           }
         }
       }
@@ -202,8 +206,8 @@
         Iterator<Map.Entry<Block, PendingBlockInfo>> iter =
                                     pendingReplications.entrySet().iterator();
         long now = now();
-        if(FSNamesystem.LOG.isDebugEnabled()) {
-          FSNamesystem.LOG.debug("PendingReplicationMonitor checking Q");
+        if(LOG.isDebugEnabled()) {
+          LOG.debug("PendingReplicationMonitor checking Q");
         }
         while (iter.hasNext()) {
           Map.Entry<Block, PendingBlockInfo> entry = iter.next();
@@ -213,8 +217,7 @@
             synchronized (timedOutItems) {
               timedOutItems.add(block);
             }
-            FSNamesystem.LOG.warn(
-                "PendingReplicationMonitor timed out block " + block);
+            LOG.warn("PendingReplicationMonitor timed out " + block);
             iter.remove();
           }
         }
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
index e96ce9e..a2b7ca6 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
@@ -43,6 +43,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.BlockReader;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -64,7 +65,7 @@
 @InterfaceAudience.Private
 public class JspHelper {
   public static final String CURRENT_CONF = "current.conf";
-  final static public String WEB_UGI_PROPERTY_NAME = "dfs.web.ugi";
+  final static public String WEB_UGI_PROPERTY_NAME = DFSConfigKeys.DFS_WEB_UGI_KEY;
   public static final String DELEGATION_PARAMETER_NAME = "delegation";
   public static final String NAMENODE_ADDRESS = "nnaddr";
   static final String SET_DELEGATION = "&" + DELEGATION_PARAMETER_NAME +
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 05efe2c..19ad35b 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -206,7 +206,7 @@
    * One of the storage directories.
    */
   @InterfaceAudience.Private
-  public class StorageDirectory {
+  public static class StorageDirectory {
     final File root;              // root directory
     final boolean useLock;        // flag to enable storage lock
     final StorageDirType dirType; // storage dir type
@@ -247,75 +247,11 @@
      */
     public StorageDirType getStorageDirType() {
       return dirType;
-    }
-    
-    /**
-     * Read version file.
-     * 
-     * @throws IOException if file cannot be read or contains inconsistent data
-     */
-    public void read() throws IOException {
-      read(getVersionFile());
-    }
-    public void read(File from) throws IOException {
-      Properties props = readFrom(from);
-      getFields(props, this);
-    }
-    
-    public Properties readFrom(File from) throws IOException {
-      RandomAccessFile file = new RandomAccessFile(from, "rws");
-      FileInputStream in = null;
-      Properties props = new Properties();
-      try {
-        in = new FileInputStream(file.getFD());
-        file.seek(0);
-        props.load(in);
-      } finally {
-        if (in != null) {
-          in.close();
-        }
-        file.close();
-      }
-      return props;
-    }
+    }    
 
-    /**
-     * Write version file.
-     * 
-     * @throws IOException
-     */
-    public void write() throws IOException {
-      write(getVersionFile());
-    }
-
-    public void write(File to) throws IOException {
-      Properties props = new Properties();
-      setFields(props, this);
-      RandomAccessFile file = new RandomAccessFile(to, "rws");
-      FileOutputStream out = null;
-      try {
-        file.seek(0);
-        out = new FileOutputStream(file.getFD());
-        /*
-         * If server is interrupted before this line, 
-         * the version file will remain unchanged.
-         */
-        props.store(out, null);
-        /*
-         * Now the new fields are flushed to the head of the file, but file 
-         * length can still be larger then required and therefore the file can 
-         * contain whole or corrupted fields from its old contents in the end.
-         * If server is interrupted here and restarted later these extra fields
-         * either should not effect server behavior or should be handled
-         * by the server correctly.
-         */
-        file.setLength(out.getChannel().position());
-      } finally {
-        if (out != null) {
-          out.close();
-        }
-        file.close();
-      }
+    public void read(File from, Storage storage) throws IOException {
+      Properties props = readPropertiesFile(from);
+      storage.setFieldsFromProperties(props, this);
     }
 
     /**
@@ -467,7 +403,8 @@
      * consistent and cannot be recovered.
      * @throws IOException
      */
-    public StorageState analyzeStorage(StartupOption startOpt) throws IOException {
+    public StorageState analyzeStorage(StartupOption startOpt, Storage storage)
+        throws IOException {
       assert root != null : "root is null";
       String rootPath = root.getCanonicalPath();
       try { // check that storage exists
@@ -499,8 +436,9 @@
 
       if (startOpt == HdfsConstants.StartupOption.FORMAT)
         return StorageState.NOT_FORMATTED;
+
       if (startOpt != HdfsConstants.StartupOption.IMPORT) {
-        checkOldLayoutStorage(this);
+        storage.checkOldLayoutStorage(this);
       }
 
       // check whether current directory is valid
@@ -807,9 +745,8 @@
    * @param props
    * @throws IOException
    */
-  protected void getFields(Properties props, 
-                           StorageDirectory sd 
-                           ) throws IOException {
+  protected void setFieldsFromProperties(
+      Properties props, StorageDirectory sd) throws IOException {
     setLayoutVersion(props, sd);
     setNamespaceID(props, sd);
     setStorageType(props, sd);
@@ -818,15 +755,14 @@
   }
   
   /**
-   * Set common storage fields.
+   * Set common storage fields into the given properties object.
    * Should be overloaded if additional fields need to be set.
    * 
-   * @param props
-   * @throws IOException
+   * @param props the Properties object to write into
    */
-  protected void setFields(Properties props, 
-                           StorageDirectory sd 
-                           ) throws IOException {
+  protected void setPropertiesFromFields(Properties props, 
+                                         StorageDirectory sd)
+      throws IOException {
     props.setProperty("layoutVersion", String.valueOf(layoutVersion));
     props.setProperty("storageType", storageType.toString());
     props.setProperty("namespaceID", String.valueOf(namespaceID));
@@ -837,6 +773,77 @@
     props.setProperty("cTime", String.valueOf(cTime));
   }
 
+  /**
+   * Read properties from the VERSION file in the given storage directory.
+   */
+  public void readProperties(StorageDirectory sd) throws IOException {
+    Properties props = readPropertiesFile(sd.getVersionFile());
+    setFieldsFromProperties(props, sd);
+  }
+
+  /**
+   * Read properties from the the previous/VERSION file in the given storage directory.
+   */
+  public void readPreviousVersionProperties(StorageDirectory sd)
+      throws IOException {
+    Properties props = readPropertiesFile(sd.getPreviousVersionFile());
+    setFieldsFromProperties(props, sd);
+  }
+
+  /**
+   * Write properties to the VERSION file in the given storage directory.
+   */
+  public void writeProperties(StorageDirectory sd) throws IOException {
+    writeProperties(sd.getVersionFile(), sd);
+  }
+
+  public void writeProperties(File to, StorageDirectory sd) throws IOException {
+    Properties props = new Properties();
+    setPropertiesFromFields(props, sd);
+    RandomAccessFile file = new RandomAccessFile(to, "rws");
+    FileOutputStream out = null;
+    try {
+      file.seek(0);
+      out = new FileOutputStream(file.getFD());
+      /*
+       * If server is interrupted before this line, 
+       * the version file will remain unchanged.
+       */
+      props.store(out, null);
+      /*
+       * Now the new fields are flushed to the head of the file, but file 
+       * length can still be larger then required and therefore the file can 
+       * contain whole or corrupted fields from its old contents in the end.
+       * If server is interrupted here and restarted later these extra fields
+       * either should not effect server behavior or should be handled
+       * by the server correctly.
+       */
+      file.setLength(out.getChannel().position());
+    } finally {
+      if (out != null) {
+        out.close();
+      }
+      file.close();
+    }
+  }
+  
+  public static Properties readPropertiesFile(File from) throws IOException {
+    RandomAccessFile file = new RandomAccessFile(from, "rws");
+    FileInputStream in = null;
+    Properties props = new Properties();
+    try {
+      in = new FileInputStream(file.getFD());
+      file.seek(0);
+      props.load(in);
+    } finally {
+      if (in != null) {
+        in.close();
+      }
+      file.close();
+    }
+    return props;
+  }
+
   public static void rename(File from, File to) throws IOException {
     if (!from.renameTo(to))
       throw new IOException("Failed to rename " 
@@ -861,7 +868,7 @@
   public void writeAll() throws IOException {
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     for (Iterator<StorageDirectory> it = storageDirs.iterator(); it.hasNext();) {
-      it.next().write();
+      writeProperties(it.next());
     }
   }
 
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index e66d7db..b547701b 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -102,7 +102,7 @@
       StorageDirectory sd = new StorageDirectory(dataDir, null, false);
       StorageState curState;
       try {
-        curState = sd.analyzeStorage(startOpt);
+        curState = sd.analyzeStorage(startOpt, this);
         // sd is locked but not opened
         switch (curState) {
         case NORMAL:
@@ -176,7 +176,7 @@
     this.namespaceID = nsInfo.getNamespaceID();
     this.blockpoolID = nsInfo.getBlockPoolID();
     this.storageType = NodeType.DATA_NODE;
-    bpSdir.write();
+    writeProperties(bpSdir);
   }
 
   /**
@@ -184,7 +184,7 @@
    * VERSION file
    */
   @Override
-  protected void setFields(Properties props, StorageDirectory sd)
+  protected void setPropertiesFromFields(Properties props, StorageDirectory sd)
       throws IOException {
     props.setProperty("layoutVersion", String.valueOf(layoutVersion));
     props.setProperty("namespaceID", String.valueOf(namespaceID));
@@ -208,7 +208,7 @@
   }
   
   @Override
-  protected void getFields(Properties props, StorageDirectory sd)
+  protected void setFieldsFromProperties(Properties props, StorageDirectory sd)
       throws IOException {
     setLayoutVersion(props, sd);
     setNamespaceID(props, sd);
@@ -237,7 +237,7 @@
     if (startOpt == StartupOption.ROLLBACK)
       doRollback(sd, nsInfo); // rollback if applicable
     
-    sd.read();
+    readProperties(sd);
     checkVersionUpgradable(this.layoutVersion);
     assert this.layoutVersion >= FSConstants.LAYOUT_VERSION 
        : "Future version is not allowed";
@@ -331,7 +331,7 @@
     assert this.namespaceID == nsInfo.getNamespaceID() 
         : "Data-node and name-node layout versions must be the same.";
     this.cTime = nsInfo.getCTime();
-    bpSd.write();
+    writeProperties(bpSd);
     
     // 4.rename <SD>/curernt/<bpid>/previous.tmp to <SD>/curernt/<bpid>/previous
     rename(bpTmpDir, bpPrevDir);
@@ -383,8 +383,7 @@
       return;
     // read attributes out of the VERSION file of previous directory
     DataStorage prevInfo = new DataStorage();
-    StorageDirectory prevSD = prevInfo.new StorageDirectory(bpSd.getRoot());
-    prevSD.read(prevSD.getPreviousVersionFile());
+    prevInfo.readPreviousVersionProperties(bpSd);
 
     // We allow rollback to a state, which is either consistent with
     // the namespace state or can be further upgraded to it.
@@ -392,7 +391,7 @@
     // && ( DN.previousCTime <= NN.ctime)
     if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION && 
         prevInfo.getCTime() <= nsInfo.getCTime())) { // cannot rollback
-      throw new InconsistentFSStateException(prevSD.getRoot(),
+      throw new InconsistentFSStateException(bpSd.getRoot(),
           "Cannot rollback to a newer state.\nDatanode previous state: LV = "
               + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime()
               + " is newer than the namespace state: LV = "
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 66336c4..733e9b4 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -18,32 +18,7 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import static org.apache.hadoop.hdfs.server.common.Util.now;
 
 import java.io.BufferedOutputStream;
@@ -448,8 +423,11 @@
       name = config.get(DFS_DATANODE_HOST_NAME_KEY);
     }
     if (name == null) {
-      name = DNS.getDefaultHost(config.get("dfs.datanode.dns.interface",
-          "default"), config.get("dfs.datanode.dns.nameserver", "default"));
+      name = DNS
+          .getDefaultHost(config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
+              DFS_DATANODE_DNS_INTERFACE_DEFAULT), config.get(
+              DFS_DATANODE_DNS_NAMESERVER_KEY,
+              DFS_DATANODE_DNS_NAMESERVER_DEFAULT));
     }
     return name;
   }
@@ -521,7 +499,7 @@
   }
   
   private void startPlugins(Configuration conf) {
-    plugins = conf.getInstances("dfs.datanode.plugins", ServicePlugin.class);
+    plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class);
     for (ServicePlugin p: plugins) {
       try {
         p.start(this);
@@ -810,8 +788,9 @@
       StartupOption startOpt = getStartupOption(conf);
       assert startOpt != null : "Startup option must be set.";
 
-      boolean simulatedFSDataset = 
-        conf.getBoolean("dfs.datanode.simulateddatastorage", false);
+      boolean simulatedFSDataset = conf.getBoolean(
+          DFS_DATANODE_SIMULATEDDATASTORAGE_KEY,
+          DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT);
       
       if (simulatedFSDataset) {
         initFsDataSet(conf, dataDirs);
@@ -1455,8 +1434,9 @@
     }
 
     // get version and id info from the name-node
-    boolean simulatedFSDataset = 
-      conf.getBoolean("dfs.datanode.simulateddatastorage", false);
+    boolean simulatedFSDataset = conf.getBoolean(
+        DFS_DATANODE_SIMULATEDDATASTORAGE_KEY,
+        DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT);
 
     if (simulatedFSDataset) {
       storage.createStorageID(getPort());
@@ -1480,8 +1460,8 @@
    * Determine the http server's effective addr
    */
   public static InetSocketAddress getInfoAddr(Configuration conf) {
-    return NetUtils.createSocketAddr(
-        conf.get("dfs.datanode.http.address", "0.0.0.0:50075"));
+    return NetUtils.createSocketAddr(conf.get(DFS_DATANODE_HTTP_ADDRESS_KEY,
+        DFS_DATANODE_HTTP_ADDRESS_DEFAULT));
   }
   
   private void registerMXBean() {
@@ -2258,11 +2238,11 @@
   }
 
   private static void setStartupOption(Configuration conf, StartupOption opt) {
-    conf.set("dfs.datanode.startup", opt.toString());
+    conf.set(DFS_DATANODE_STARTUP_KEY, opt.toString());
   }
 
   static StartupOption getStartupOption(Configuration conf) {
-    return StartupOption.valueOf(conf.get("dfs.datanode.startup",
+    return StartupOption.valueOf(conf.get(DFS_DATANODE_STARTUP_KEY,
                                           StartupOption.REGULAR.toString()));
   }
 
@@ -2661,7 +2641,7 @@
   // Determine a Datanode's streaming address
   public static InetSocketAddress getStreamingAddr(Configuration conf) {
     return NetUtils.createSocketAddr(
-        conf.get("dfs.datanode.address", "0.0.0.0:50010"));
+        conf.get(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT));
   }
   
   @Override // DataNodeMXBean
@@ -2672,7 +2652,7 @@
   @Override // DataNodeMXBean
   public String getRpcPort(){
     InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
-        this.getConf().get("dfs.datanode.ipc.address"));
+        this.getConf().get(DFS_DATANODE_IPC_ADDRESS_KEY));
     return Integer.toString(ipcAddr.getPort());
   }
 
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 1db7644..784ab94 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -56,7 +56,6 @@
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DiskChecker;
-import org.apache.hadoop.util.StringUtils;
 
 /** 
  * Data storage information file.
@@ -153,7 +152,7 @@
       StorageDirectory sd = new StorageDirectory(dataDir);
       StorageState curState;
       try {
-        curState = sd.analyzeStorage(startOpt);
+        curState = sd.analyzeStorage(startOpt, this);
         // sd is locked but not opened
         switch(curState) {
         case NORMAL:
@@ -274,7 +273,7 @@
     this.namespaceID = nsInfo.getNamespaceID();
     this.cTime = 0;
     // store storageID as it currently is
-    sd.write();
+    writeProperties(sd);
   }
 
   /*
@@ -282,7 +281,7 @@
    * DataStorage VERSION file
   */
   @Override
-  protected void setFields(Properties props, 
+  protected void setPropertiesFromFields(Properties props, 
                            StorageDirectory sd 
                            ) throws IOException {
     props.setProperty("storageType", storageType.toString());
@@ -301,7 +300,7 @@
    * DataStorage VERSION file and verify them.
    */
   @Override
-  protected void getFields(Properties props, StorageDirectory sd)
+  protected void setFieldsFromProperties(Properties props, StorageDirectory sd)
       throws IOException {
     setLayoutVersion(props, sd);
     setcTime(props, sd);
@@ -373,7 +372,7 @@
     if (startOpt == StartupOption.ROLLBACK) {
       doRollback(sd, nsInfo); // rollback if applicable
     }
-    sd.read();
+    readProperties(sd);
     checkVersionUpgradable(this.layoutVersion);
     assert this.layoutVersion >= FSConstants.LAYOUT_VERSION :
       "Future version is not allowed";
@@ -448,7 +447,7 @@
     if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       clusterID = nsInfo.getClusterID();
       layoutVersion = nsInfo.getLayoutVersion();
-      sd.write();
+      writeProperties(sd);
       return;
     }
     
@@ -485,7 +484,7 @@
     // 4. Write version file under <SD>/current
     layoutVersion = FSConstants.LAYOUT_VERSION;
     clusterID = nsInfo.getClusterID();
-    sd.write();
+    writeProperties(sd);
     
     // 5. Rename <SD>/previous.tmp to <SD>/previous
     rename(tmpDir, prevDir);
@@ -539,14 +538,13 @@
     if (!prevDir.exists())
       return;
     DataStorage prevInfo = new DataStorage();
-    StorageDirectory prevSD = prevInfo.new StorageDirectory(sd.getRoot());
-    prevSD.read(prevSD.getPreviousVersionFile());
+    prevInfo.readPreviousVersionProperties(sd);
 
     // We allow rollback to a state, which is either consistent with
     // the namespace state or can be further upgraded to it.
     if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION
           && prevInfo.getCTime() <= nsInfo.getCTime()))  // cannot rollback
-      throw new InconsistentFSStateException(prevSD.getRoot(),
+      throw new InconsistentFSStateException(sd.getRoot(),
           "Cannot rollback to a newer state.\nDatanode previous state: LV = "
               + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime()
               + " is newer than the namespace state: LV = "
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 9533fff..d7c0a93 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -54,7 +54,6 @@
 @InterfaceAudience.Private
 public class DirectoryScanner implements Runnable {
   private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
-  private static final int DEFAULT_SCAN_INTERVAL = 21600;
 
   private final DataNode datanode;
   private final FSDataset dataset;
@@ -225,7 +224,7 @@
     this.datanode = dn;
     this.dataset = dataset;
     int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
-        DEFAULT_SCAN_INTERVAL);
+        DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT);
     scanPeriodMsecs = interval * 1000L; //msec
     int threads = 
         conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
index b409878..eca31fe 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
@@ -107,7 +107,7 @@
       StorageDirectory sd = it.next();
       StorageState curState;
       try {
-        curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR);
+        curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR, storage);
         // sd is locked but not opened
         switch(curState) {
         case NON_EXISTENT:
@@ -126,7 +126,8 @@
           sd.doRecover(curState);
         }
         if(curState != StorageState.NOT_FORMATTED) {
-          sd.read(); // read and verify consistency with other directories
+          // read and verify consistency with other directories
+          storage.readProperties(sd);
         }
       } catch(IOException ioe) {
         sd.unlock();
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
index cca7dfc..5dd8239 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
@@ -19,7 +19,6 @@
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.util.ArrayList;
 import java.util.Arrays;
 
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -27,7 +26,6 @@
 import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
 
@@ -45,26 +43,9 @@
   private JournalProtocol backupNode;        // RPC proxy to backup node
   private NamenodeRegistration bnRegistration;  // backup node registration
   private NamenodeRegistration nnRegistration;  // active node registration
-  private ArrayList<JournalRecord> bufCurrent;  // current buffer for writing
-  private ArrayList<JournalRecord> bufReady;    // buffer ready for flushing
+  private EditsDoubleBuffer doubleBuf;
   private DataOutputBuffer out;     // serialized output sent to backup node
 
-  static class JournalRecord {
-    byte op;
-    long txid;
-    Writable[] args;
-
-    JournalRecord(byte op, long txid, Writable ... writables) {
-      this.op = op;
-      this.txid = txid;
-      this.args = writables;
-    }
-
-    void write(DataOutputBuffer out) throws IOException {
-      writeChecksummedOp(out, op, txid, args);
-    }
-  }
-
   EditLogBackupOutputStream(NamenodeRegistration bnReg, // backup node
                             NamenodeRegistration nnReg) // active name-node
   throws IOException {
@@ -82,8 +63,7 @@
       Storage.LOG.error("Error connecting to: " + bnAddress, e);
       throw e;
     }
-    this.bufCurrent = new ArrayList<JournalRecord>();
-    this.bufReady = new ArrayList<JournalRecord>();
+    this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
     this.out = new DataOutputBuffer(DEFAULT_BUFFER_SIZE);
   }
   
@@ -97,14 +77,14 @@
     return JournalType.BACKUP;
   }
 
-  @Override
-  void write(byte[] data, int i, int length) throws IOException {
-    throw new IOException("Not implemented");
-  }
-
   @Override // EditLogOutputStream
-  void write(byte op, long txid, Writable ... writables) throws IOException {
-    bufCurrent.add(new JournalRecord(op, txid, writables));
+  void write(FSEditLogOp op) throws IOException {
+    doubleBuf.writeOp(op);
+ }
+
+  @Override
+  void writeRaw(byte[] bytes, int offset, int length) throws IOException {
+    throw new IOException("Not supported");
   }
 
   /**
@@ -112,51 +92,53 @@
    */
   @Override // EditLogOutputStream
   void create() throws IOException {
-    bufCurrent.clear();
-    assert bufReady.size() == 0 : "previous data is not flushed yet";
+    assert doubleBuf.isFlushed() : "previous data is not flushed yet";
+    this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
   }
 
   @Override // EditLogOutputStream
   public void close() throws IOException {
     // close should have been called after all pending transactions 
     // have been flushed & synced.
-    int size = bufCurrent.size();
+    int size = doubleBuf.countBufferedBytes();
     if (size != 0) {
       throw new IOException("BackupEditStream has " + size +
                           " records still to be flushed and cannot be closed.");
     } 
     RPC.stopProxy(backupNode); // stop the RPC threads
-    bufCurrent = bufReady = null;
+    doubleBuf.close();
+    doubleBuf = null;
   }
 
   @Override
   public void abort() throws IOException {
     RPC.stopProxy(backupNode);
-    bufCurrent = bufReady = null;
+    doubleBuf = null;
   }
 
   @Override // EditLogOutputStream
   void setReadyToFlush() throws IOException {
-    assert bufReady.size() == 0 : "previous data is not flushed yet";
-    ArrayList<JournalRecord>  tmp = bufReady;
-    bufReady = bufCurrent;
-    bufCurrent = tmp;
+    doubleBuf.setReadyToFlush();
   }
 
   @Override // EditLogOutputStream
   protected void flushAndSync() throws IOException {
-    assert out.size() == 0 : "Output buffer is not empty";
-    for (JournalRecord jRec : bufReady) {
-      jRec.write(out);
-    }
-    if (out.size() > 0) {
+    assert out.getLength() == 0 : "Output buffer is not empty";
+    
+    int numReadyTxns = doubleBuf.countReadyTxns();
+    long firstTxToFlush = doubleBuf.getFirstReadyTxId();
+    
+    doubleBuf.flushTo(out);
+    if (out.getLength() > 0) {
+      assert numReadyTxns > 0;
+      
       byte[] data = Arrays.copyOf(out.getData(), out.getLength());
+      out.reset();
+      assert out.getLength() == 0 : "Output buffer is not empty";
+
       backupNode.journal(nnRegistration,
-          bufReady.get(0).txid, bufReady.size(),
-          data);
+          firstTxToFlush, numReadyTxns, data);
     }
-    bufReady.clear();         // erase all data in the buffer
-    out.reset();              // reset buffer to the start position
   }
 
   /**
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
index b4231e7..f79f442 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
@@ -28,9 +28,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.Writable;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -46,10 +44,7 @@
   private File file;
   private FileOutputStream fp; // file stream for storing edit logs
   private FileChannel fc; // channel of the file stream for sync
-  private DataOutputBuffer bufCurrent; // current buffer for writing
-  private DataOutputBuffer bufReady; // buffer ready for flushing
-  final private int initBufferSize; // inital buffer size
-
+  private EditsDoubleBuffer doubleBuf;
   static ByteBuffer fill = ByteBuffer.allocateDirect(1024 * 1024); // preallocation, 1MB
 
   static {
@@ -71,9 +66,7 @@
   EditLogFileOutputStream(File name, int size) throws IOException {
     super();
     file = name;
-    initBufferSize = size;
-    bufCurrent = new DataOutputBuffer(size);
-    bufReady = new DataOutputBuffer(size);
+    doubleBuf = new EditsDoubleBuffer(size);
     RandomAccessFile rp = new RandomAccessFile(name, "rw");
     fp = new FileOutputStream(rp.getFD()); // open for append
     fc = rp.getChannel();
@@ -90,15 +83,10 @@
     return JournalType.FILE;
   }
 
-  /**
-   * Write a single byte to the output stream.
-   * @param b the byte to write
-   */
-  private void write(int b) throws IOException {
-    if (fp == null) {
-      throw new IOException("Trying to use aborted output stream");
-    }
-    bufCurrent.write(b);
+  /** {@inheritDoc} */
+  @Override
+  void write(FSEditLogOp op) throws IOException {
+    doubleBuf.writeOp(op);
   }
 
   /**
@@ -110,16 +98,8 @@
    * </ul>
    * */
   @Override
-  void write(byte op, long txid, Writable... writables) throws IOException {
-    if (fp == null) {
-      throw new IOException("Trying to use aborted output stream");
-    }
-    writeChecksummedOp(bufCurrent, op, txid, writables);
-  }
-
-  @Override
-  void write(final byte[] data, int off, int len) throws IOException {
-    bufCurrent.write(data, off, len);
+  void writeRaw(byte[] bytes, int offset, int length) throws IOException {
+    doubleBuf.writeRaw(bytes, offset, length);
   }
 
   /**
@@ -129,7 +109,7 @@
   void create() throws IOException {
     fc.truncate(0);
     fc.position(0);
-    bufCurrent.writeInt(FSConstants.LAYOUT_VERSION);
+    doubleBuf.getCurrentBuf().writeInt(FSConstants.LAYOUT_VERSION);
     setReadyToFlush();
     flush();
   }
@@ -144,22 +124,11 @@
       // close should have been called after all pending transactions
       // have been flushed & synced.
       // if already closed, just skip
-      if(bufCurrent != null)
-      {
-        int bufSize = bufCurrent.size();
-        if (bufSize != 0) {
-          throw new IOException("FSEditStream has " + bufSize
-              + " bytes still to be flushed and cannot " + "be closed.");
-        }
-        bufCurrent.close();
-        bufCurrent = null;
+      if (doubleBuf != null) {
+        doubleBuf.close();
+        doubleBuf = null;
       }
-  
-      if(bufReady != null) {
-        bufReady.close();
-        bufReady = null;
-      }
-  
+      
       // remove the last INVALID marker from transaction log.
       if (fc != null && fc.isOpen()) {
         fc.truncate(fc.position());
@@ -171,8 +140,8 @@
         fp = null;
       }
     } finally {
-      IOUtils.cleanup(FSNamesystem.LOG, bufCurrent, bufReady, fc, fp);
-      bufCurrent = bufReady = null;
+      IOUtils.cleanup(FSNamesystem.LOG, fc, fp);
+      doubleBuf = null;
       fc = null;
       fp = null;
     }
@@ -194,11 +163,8 @@
    */
   @Override
   void setReadyToFlush() throws IOException {
-    assert bufReady.size() == 0 : "previous data is not flushed yet";
-    write(FSEditLogOpCodes.OP_INVALID.getOpCode()); // insert eof marker
-    DataOutputBuffer tmp = bufReady;
-    bufReady = bufCurrent;
-    bufCurrent = tmp;
+    doubleBuf.getCurrentBuf().write(FSEditLogOpCodes.OP_INVALID.getOpCode()); // insert eof marker
+    doubleBuf.setReadyToFlush();
   }
 
   /**
@@ -212,8 +178,7 @@
     }
     
     preallocate(); // preallocate file if necessary
-    bufReady.writeTo(fp); // write data to file
-    bufReady.reset(); // erase all data in the buffer
+    doubleBuf.flushTo(fp);
     fc.force(false); // metadata updates not needed because of preallocation
     fc.position(fc.position() - 1); // skip back the end-of-file marker
   }
@@ -223,7 +188,7 @@
    */
   @Override
   public boolean shouldForceSync() {
-    return bufReady.size() >= initBufferSize;
+    return doubleBuf.shouldForceSync();
   }
   
   /**
@@ -232,8 +197,8 @@
   @Override
   long length() throws IOException {
     // file size - header size + size of both buffers
-    return fc.size() - EDITS_FILE_HEADER_SIZE_BYTES + bufReady.size()
-        + bufCurrent.size();
+    return fc.size() - EDITS_FILE_HEADER_SIZE_BYTES + 
+      doubleBuf.countBufferedBytes();
   }
 
   // allocate a big chunk of data
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
index 9e59f1f..8577db8 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
@@ -39,26 +39,27 @@
   }
 
   /**
-   * Write edits log record into the stream.
-   * The record is represented by operation name and
-   * an array of Writable arguments.
+   * Write edits log operation to the stream.
    * 
    * @param op operation
-   * @param txid the transaction ID of this operation
-   * @param writables array of Writable arguments
    * @throws IOException
    */
-  abstract void write(byte op, long txid, Writable ... writables)
-  throws IOException;
-  
+  abstract void write(FSEditLogOp op) throws IOException;
+
   /**
    * Write raw data to an edit log. This data should already have
    * the transaction ID, checksum, etc included. It is for use
    * within the BackupNode when replicating edits from the
    * NameNode.
+   *
+   * @param bytes the bytes to write.
+   * @param offset offset in the bytes to write from
+   * @param length number of bytes to write
+   * @throws IOException
    */
-  abstract void write(byte[] data, int offset, int length) throws IOException;
-  
+  abstract void writeRaw(byte[] bytes, int offset, int length)
+      throws IOException;
+
   /**
    * Create and initialize underlying persistent edits log storage.
    * 
@@ -139,26 +140,4 @@
   public String toString() {
     return getName();
   }
-
-  /**
-   * Write the given operation to the specified buffer, including
-   * the transaction ID and checksum.
-   */
-  protected static void writeChecksummedOp(
-      DataOutputBuffer buf, byte op, long txid, Writable... writables)
-      throws IOException {
-    int start = buf.getLength();
-    buf.write(op);
-    buf.writeLong(txid);
-    for (Writable w : writables) {
-      w.write(buf);
-    }
-    // write transaction checksum
-    int end = buf.getLength();
-    Checksum checksum = FSEditLog.getChecksum();
-    checksum.reset();
-    checksum.update(buf.getData(), start, end-start);
-    int sum = (int)checksum.getValue();
-    buf.writeInt(sum);
-  }
 }
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
new file mode 100644
index 0000000..0dd9058
--- /dev/null
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Writer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.IOUtils;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * A double-buffer for edits. New edits are written into the first buffer
+ * while the second is available to be flushed. Each time the double-buffer
+ * is flushed, the two internal buffers are swapped. This allows edits
+ * to progress concurrently to flushes without allocating new buffers each
+ * time.
+ */
+class EditsDoubleBuffer {
+
+  private TxnBuffer bufCurrent; // current buffer for writing
+  private TxnBuffer bufReady; // buffer ready for flushing
+  private final int initBufferSize;
+
+  public EditsDoubleBuffer(int defaultBufferSize) {
+    initBufferSize = defaultBufferSize;
+    bufCurrent = new TxnBuffer(initBufferSize);
+    bufReady = new TxnBuffer(initBufferSize);
+
+  }
+    
+  public void writeOp(FSEditLogOp op) throws IOException {
+    bufCurrent.writeOp(op);
+  }
+
+  void writeRaw(byte[] bytes, int offset, int length) throws IOException {
+    bufCurrent.write(bytes, offset, length);
+  }
+  
+  void close() throws IOException {
+    Preconditions.checkNotNull(bufCurrent);
+    Preconditions.checkNotNull(bufReady);
+
+    int bufSize = bufCurrent.size();
+    if (bufSize != 0) {
+      throw new IOException("FSEditStream has " + bufSize
+          + " bytes still to be flushed and cannot be closed.");
+    }
+
+    IOUtils.cleanup(null, bufCurrent, bufReady);
+    bufCurrent = bufReady = null;
+  }
+  
+  void setReadyToFlush() {
+    assert isFlushed() : "previous data not flushed yet";
+    TxnBuffer tmp = bufReady;
+    bufReady = bufCurrent;
+    bufCurrent = tmp;
+  }
+  
+  /**
+   * Writes the content of the "ready" buffer to the given output stream,
+   * and resets it. Does not swap any buffers.
+   */
+  void flushTo(OutputStream out) throws IOException {
+    bufReady.writeTo(out); // write data to file
+    bufReady.reset(); // erase all data in the buffer
+  }
+  
+  boolean shouldForceSync() {
+    return bufReady.size() >= initBufferSize;
+  }
+
+  DataOutputBuffer getCurrentBuf() {
+    return bufCurrent;
+  }
+
+  public boolean isFlushed() {
+    return bufReady.size() == 0;
+  }
+
+  public int countBufferedBytes() {
+    return bufReady.size() + bufCurrent.size();
+  }
+
+  /**
+   * @return the transaction ID of the first transaction ready to be flushed 
+   */
+  public long getFirstReadyTxId() {
+    assert bufReady.firstTxId > 0;
+    return bufReady.firstTxId;
+  }
+
+  /**
+   * @return the number of transactions that are ready to be flushed
+   */
+  public int countReadyTxns() {
+    return bufReady.numTxns;
+  }
+
+  
+  private static class TxnBuffer extends DataOutputBuffer {
+    long firstTxId;
+    int numTxns;
+    private Writer writer;
+    
+    public TxnBuffer(int initBufferSize) {
+      super(initBufferSize);
+      writer = new FSEditLogOp.Writer(this);
+      reset();
+    }
+
+    public void writeOp(FSEditLogOp op) throws IOException {
+      if (firstTxId == FSConstants.INVALID_TXID) {
+        firstTxId = op.txid;
+      } else {
+        assert op.txid > firstTxId;
+      }
+      writer.writeOp(op);
+      numTxns++;
+    }
+    
+    @Override
+    public DataOutputBuffer reset() {
+      super.reset();
+      firstTxId = FSConstants.INVALID_TXID;
+      numTxns = 0;
+      return this;
+    }
+  }
+
+}
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index dab304c..bf137ae 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -21,6 +21,7 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.zip.Checksum;
+import java.util.zip.CheckedOutputStream;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -28,7 +29,6 @@
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -40,7 +40,6 @@
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
-import org.apache.hadoop.io.ArrayWritable;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Writable;
@@ -52,6 +51,7 @@
 import com.google.common.collect.Lists;
 
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.*;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.*;
 
 /**
  * FSEditLog maintains a log of the namespace modifications.
@@ -207,7 +207,7 @@
    * Write an operation to the edit log. Do not sync to persistent
    * store yet.
    */
-  void logEdit(final FSEditLogOpCodes opCode, final Writable ... writables) {
+  void logEdit(final FSEditLogOp op) {
     synchronized (this) {
       assert state != State.CLOSED;
       
@@ -219,12 +219,13 @@
       }
       
       long start = beginTransaction();
+      op.setTransactionId(txid);
 
       mapJournalsAndReportErrors(new JournalClosure() {
         @Override 
         public void apply(JournalAndStream jas) throws IOException {
           if (!jas.isActive()) return;
-          jas.stream.write(opCode.getOpCode(), txid, writables);
+          jas.stream.write(op);
         }
       }, "logging edit");
 
@@ -520,49 +521,45 @@
    * Records the block locations of the last block.
    */
   public void logOpenFile(String path, INodeFileUnderConstruction newNode) {
-
-    DeprecatedUTF8 nameReplicationPair[] = new DeprecatedUTF8[] { 
-      new DeprecatedUTF8(path), 
-      FSEditLog.toLogReplication(newNode.getReplication()),
-      FSEditLog.toLogLong(newNode.getModificationTime()),
-      FSEditLog.toLogLong(newNode.getAccessTime()),
-      FSEditLog.toLogLong(newNode.getPreferredBlockSize())};
-    logEdit(OP_ADD,
-            new ArrayWritable(DeprecatedUTF8.class, nameReplicationPair), 
-            new ArrayWritable(Block.class, newNode.getBlocks()),
-            newNode.getPermissionStatus(),
-            new DeprecatedUTF8(newNode.getClientName()),
-            new DeprecatedUTF8(newNode.getClientMachine()));
+    AddOp op = AddOp.getInstance()
+      .setPath(path)
+      .setReplication(newNode.getReplication())
+      .setModificationTime(newNode.getModificationTime())
+      .setAccessTime(newNode.getAccessTime())
+      .setBlockSize(newNode.getPreferredBlockSize())
+      .setBlocks(newNode.getBlocks())
+      .setPermissionStatus(newNode.getPermissionStatus())
+      .setClientName(newNode.getClientName())
+      .setClientMachine(newNode.getClientMachine());
+    
+      logEdit(op);
   }
 
   /** 
    * Add close lease record to edit log.
    */
   public void logCloseFile(String path, INodeFile newNode) {
-    DeprecatedUTF8 nameReplicationPair[] = new DeprecatedUTF8[] {
-      new DeprecatedUTF8(path),
-      FSEditLog.toLogReplication(newNode.getReplication()),
-      FSEditLog.toLogLong(newNode.getModificationTime()),
-      FSEditLog.toLogLong(newNode.getAccessTime()),
-      FSEditLog.toLogLong(newNode.getPreferredBlockSize())};
-    logEdit(OP_CLOSE,
-            new ArrayWritable(DeprecatedUTF8.class, nameReplicationPair),
-            new ArrayWritable(Block.class, newNode.getBlocks()),
-            newNode.getPermissionStatus());
+    CloseOp op = CloseOp.getInstance()
+      .setPath(path)
+      .setReplication(newNode.getReplication())
+      .setModificationTime(newNode.getModificationTime())
+      .setAccessTime(newNode.getAccessTime())
+      .setBlockSize(newNode.getPreferredBlockSize())
+      .setBlocks(newNode.getBlocks())
+      .setPermissionStatus(newNode.getPermissionStatus());
+    
+    logEdit(op);
   }
   
   /** 
    * Add create directory record to edit log
    */
   public void logMkDir(String path, INode newNode) {
-    DeprecatedUTF8 info[] = new DeprecatedUTF8[] {
-      new DeprecatedUTF8(path),
-      FSEditLog.toLogLong(newNode.getModificationTime()),
-      FSEditLog.toLogLong(newNode.getAccessTime())
-    };
-    logEdit(OP_MKDIR,
-      new ArrayWritable(DeprecatedUTF8.class, info),
-      newNode.getPermissionStatus());
+    MkdirOp op = MkdirOp.getInstance()
+      .setPath(path)
+      .setTimestamp(newNode.getModificationTime())
+      .setPermissionStatus(newNode.getPermissionStatus());
+    logEdit(op);
   }
   
   /** 
@@ -570,33 +567,33 @@
    * TODO: use String parameters until just before writing to disk
    */
   void logRename(String src, String dst, long timestamp) {
-    DeprecatedUTF8 info[] = new DeprecatedUTF8[] { 
-      new DeprecatedUTF8(src),
-      new DeprecatedUTF8(dst),
-      FSEditLog.toLogLong(timestamp)};
-    logEdit(OP_RENAME_OLD, new ArrayWritable(DeprecatedUTF8.class, info));
+    RenameOldOp op = RenameOldOp.getInstance()
+      .setSource(src)
+      .setDestination(dst)
+      .setTimestamp(timestamp);
+    logEdit(op);
   }
   
   /** 
    * Add rename record to edit log
    */
   void logRename(String src, String dst, long timestamp, Options.Rename... options) {
-    DeprecatedUTF8 info[] = new DeprecatedUTF8[] { 
-      new DeprecatedUTF8(src),
-      new DeprecatedUTF8(dst),
-      FSEditLog.toLogLong(timestamp)};
-    logEdit(OP_RENAME,
-      new ArrayWritable(DeprecatedUTF8.class, info),
-      toBytesWritable(options));
+    RenameOp op = RenameOp.getInstance()
+      .setSource(src)
+      .setDestination(dst)
+      .setTimestamp(timestamp)
+      .setOptions(options);
+    logEdit(op);
   }
   
   /** 
    * Add set replication record to edit log
    */
   void logSetReplication(String src, short replication) {
-    logEdit(OP_SET_REPLICATION, 
-      new DeprecatedUTF8(src), 
-      FSEditLog.toLogReplication(replication));
+    SetReplicationOp op = SetReplicationOp.getInstance()
+      .setPath(src)
+      .setReplication(replication);
+    logEdit(op);
   }
   
   /** Add set namespace quota record to edit log
@@ -605,64 +602,69 @@
    * @param quota the directory size limit
    */
   void logSetQuota(String src, long nsQuota, long dsQuota) {
-    logEdit(OP_SET_QUOTA,
-      new DeprecatedUTF8(src), 
-      new LongWritable(nsQuota), new LongWritable(dsQuota));
+    SetQuotaOp op = SetQuotaOp.getInstance()
+      .setSource(src)
+      .setNSQuota(nsQuota)
+      .setDSQuota(dsQuota);
+    logEdit(op);
   }
 
   /**  Add set permissions record to edit log */
   void logSetPermissions(String src, FsPermission permissions) {
-    logEdit(OP_SET_PERMISSIONS, new DeprecatedUTF8(src), permissions);
+    SetPermissionsOp op = SetPermissionsOp.getInstance()
+      .setSource(src)
+      .setPermissions(permissions);
+    logEdit(op);
   }
 
   /**  Add set owner record to edit log */
   void logSetOwner(String src, String username, String groupname) {
-    DeprecatedUTF8 u = new DeprecatedUTF8(username == null? "": username);
-    DeprecatedUTF8 g = new DeprecatedUTF8(groupname == null? "": groupname);
-    logEdit(OP_SET_OWNER, new DeprecatedUTF8(src), u, g);
+    SetOwnerOp op = SetOwnerOp.getInstance()
+      .setSource(src)
+      .setUser(username)
+      .setGroup(groupname);
+    logEdit(op);
   }
   
   /**
    * concat(trg,src..) log
    */
   void logConcat(String trg, String [] srcs, long timestamp) {
-    int size = 1 + srcs.length + 1; // trg, srcs, timestamp
-    DeprecatedUTF8 info[] = new DeprecatedUTF8[size];
-    int idx = 0;
-    info[idx++] = new DeprecatedUTF8(trg);
-    for(int i=0; i<srcs.length; i++) {
-      info[idx++] = new DeprecatedUTF8(srcs[i]);
-    }
-    info[idx] = FSEditLog.toLogLong(timestamp);
-    logEdit(OP_CONCAT_DELETE, new ArrayWritable(DeprecatedUTF8.class, info));
+    ConcatDeleteOp op = ConcatDeleteOp.getInstance()
+      .setTarget(trg)
+      .setSources(srcs)
+      .setTimestamp(timestamp);
+    logEdit(op);
   }
   
   /** 
    * Add delete file record to edit log
    */
   void logDelete(String src, long timestamp) {
-    DeprecatedUTF8 info[] = new DeprecatedUTF8[] { 
-      new DeprecatedUTF8(src),
-      FSEditLog.toLogLong(timestamp)};
-    logEdit(OP_DELETE, new ArrayWritable(DeprecatedUTF8.class, info));
+    DeleteOp op = DeleteOp.getInstance()
+      .setPath(src)
+      .setTimestamp(timestamp);
+    logEdit(op);
   }
 
   /** 
    * Add generation stamp record to edit log
    */
   void logGenerationStamp(long genstamp) {
-    logEdit(OP_SET_GENSTAMP, new LongWritable(genstamp));
+    SetGenstampOp op = SetGenstampOp.getInstance()
+      .setGenerationStamp(genstamp);
+    logEdit(op);
   }
 
   /** 
    * Add access time record to edit log
    */
   void logTimes(String src, long mtime, long atime) {
-    DeprecatedUTF8 info[] = new DeprecatedUTF8[] { 
-      new DeprecatedUTF8(src),
-      FSEditLog.toLogLong(mtime),
-      FSEditLog.toLogLong(atime)};
-    logEdit(OP_TIMES, new ArrayWritable(DeprecatedUTF8.class, info));
+    TimesOp op = TimesOp.getInstance()
+      .setPath(src)
+      .setModificationTime(mtime)
+      .setAccessTime(atime);
+    logEdit(op);
   }
 
   /** 
@@ -670,14 +672,13 @@
    */
   void logSymlink(String path, String value, long mtime, 
                   long atime, INodeSymlink node) {
-    DeprecatedUTF8 info[] = new DeprecatedUTF8[] { 
-      new DeprecatedUTF8(path),
-      new DeprecatedUTF8(value),
-      FSEditLog.toLogLong(mtime),
-      FSEditLog.toLogLong(atime)};
-    logEdit(OP_SYMLINK, 
-      new ArrayWritable(DeprecatedUTF8.class, info),
-      node.getPermissionStatus());
+    SymlinkOp op = SymlinkOp.getInstance()
+      .setPath(path)
+      .setValue(value)
+      .setModificationTime(mtime)
+      .setAccessTime(atime)
+      .setPermissionStatus(node.getPermissionStatus());
+    logEdit(op);
   }
   
   /**
@@ -688,36 +689,40 @@
    */
   void logGetDelegationToken(DelegationTokenIdentifier id,
       long expiryTime) {
-    logEdit(OP_GET_DELEGATION_TOKEN, id, FSEditLog.toLogLong(expiryTime));
+    GetDelegationTokenOp op = GetDelegationTokenOp.getInstance()
+      .setDelegationTokenIdentifier(id)
+      .setExpiryTime(expiryTime);
+    logEdit(op);
   }
   
   void logRenewDelegationToken(DelegationTokenIdentifier id,
       long expiryTime) {
-    logEdit(OP_RENEW_DELEGATION_TOKEN, id, FSEditLog.toLogLong(expiryTime));
+    RenewDelegationTokenOp op = RenewDelegationTokenOp.getInstance()
+      .setDelegationTokenIdentifier(id)
+      .setExpiryTime(expiryTime);
+    logEdit(op);
   }
   
   void logCancelDelegationToken(DelegationTokenIdentifier id) {
-    logEdit(OP_CANCEL_DELEGATION_TOKEN, id);
+    CancelDelegationTokenOp op = CancelDelegationTokenOp.getInstance()
+      .setDelegationTokenIdentifier(id);
+    logEdit(op);
   }
   
   void logUpdateMasterKey(DelegationKey key) {
-    logEdit(OP_UPDATE_MASTER_KEY, key);
+    UpdateMasterKeyOp op = UpdateMasterKeyOp.getInstance()
+      .setDelegationKey(key);
+    logEdit(op);
   }
 
   void logReassignLease(String leaseHolder, String src, String newHolder) {
-    logEdit(OP_REASSIGN_LEASE, new DeprecatedUTF8(leaseHolder),
-        new DeprecatedUTF8(src),
-        new DeprecatedUTF8(newHolder));
+    ReassignLeaseOp op = ReassignLeaseOp.getInstance()
+      .setLeaseHolder(leaseHolder)
+      .setPath(src)
+      .setNewHolder(newHolder);
+    logEdit(op);
   }
   
-  static private DeprecatedUTF8 toLogReplication(short replication) {
-    return new DeprecatedUTF8(Short.toString(replication));
-  }
-  
-  static private DeprecatedUTF8 toLogLong(long timestamp) {
-    return new DeprecatedUTF8(Long.toString(timestamp));
-  }
-
   /**
    * @return the number of active (non-failed) journals
    */
@@ -818,7 +823,8 @@
     state = State.IN_SEGMENT;
 
     if (writeHeaderTxn) {
-      logEdit(FSEditLogOpCodes.OP_START_LOG_SEGMENT);
+      logEdit(LogSegmentOp.getInstance(
+          FSEditLogOpCodes.OP_START_LOG_SEGMENT));
       logSync();
     }
   }
@@ -833,7 +839,8 @@
         "Bad state: %s", state);
     
     if (writeEndTxn) {
-      logEdit(FSEditLogOpCodes.OP_END_LOG_SEGMENT);
+      logEdit(LogSegmentOp.getInstance(
+          FSEditLogOpCodes.OP_END_LOG_SEGMENT));
       logSync();
     }
 
@@ -992,7 +999,7 @@
       @Override
       public void apply(JournalAndStream jas) throws IOException {
         if (jas.isActive()) {
-          jas.getCurrentStream().write(data, 0, length);
+          jas.getCurrentStream().writeRaw(data, 0, length); // TODO writeRaw
         }
       }      
     }, "Logging edit");
@@ -1000,14 +1007,6 @@
     endTransaction(start);
   }
 
-  static BytesWritable toBytesWritable(Options.Rename... options) {
-    byte[] bytes = new byte[options.length];
-    for (int i = 0; i < options.length; i++) {
-      bytes[i] = options[i].value();
-    }
-    return new BytesWritable(bytes);
-  }
-  
   //// Iteration across journals
   private interface JournalClosure {
     public void apply(JournalAndStream jas) throws IOException;
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index d3f8aff..00ce353 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -40,14 +40,18 @@
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.*;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.ArrayWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
+import org.apache.hadoop.hdfs.DeprecatedUTF8;
 
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.DataInputStream;
+import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.EOFException;
 
@@ -63,6 +67,45 @@
   long txid;
 
 
+  @SuppressWarnings("deprecation")
+  private static ThreadLocal<EnumMap<FSEditLogOpCodes, FSEditLogOp>> opInstances =
+    new ThreadLocal<EnumMap<FSEditLogOpCodes, FSEditLogOp>>() {
+      @Override
+      protected EnumMap<FSEditLogOpCodes, FSEditLogOp> initialValue() {
+        EnumMap<FSEditLogOpCodes, FSEditLogOp> instances 
+          = new EnumMap<FSEditLogOpCodes, FSEditLogOp>(FSEditLogOpCodes.class);
+        instances.put(OP_ADD, new AddOp());
+        instances.put(OP_CLOSE, new CloseOp());
+        instances.put(OP_SET_REPLICATION, new SetReplicationOp());
+        instances.put(OP_CONCAT_DELETE, new ConcatDeleteOp());
+        instances.put(OP_RENAME_OLD, new RenameOldOp());
+        instances.put(OP_DELETE, new DeleteOp());
+        instances.put(OP_MKDIR, new MkdirOp());
+        instances.put(OP_SET_GENSTAMP, new SetGenstampOp());
+        instances.put(OP_DATANODE_ADD, new DatanodeAddOp());
+        instances.put(OP_DATANODE_REMOVE, new DatanodeRemoveOp());
+        instances.put(OP_SET_PERMISSIONS, new SetPermissionsOp());
+        instances.put(OP_SET_OWNER, new SetOwnerOp());
+        instances.put(OP_SET_NS_QUOTA, new SetNSQuotaOp());
+        instances.put(OP_CLEAR_NS_QUOTA, new ClearNSQuotaOp());
+        instances.put(OP_SET_QUOTA, new SetQuotaOp());
+        instances.put(OP_TIMES, new TimesOp());
+        instances.put(OP_SYMLINK, new SymlinkOp());
+        instances.put(OP_RENAME, new RenameOp());
+        instances.put(OP_REASSIGN_LEASE, new ReassignLeaseOp());
+        instances.put(OP_GET_DELEGATION_TOKEN, new GetDelegationTokenOp());
+        instances.put(OP_RENEW_DELEGATION_TOKEN, new RenewDelegationTokenOp());
+        instances.put(OP_CANCEL_DELEGATION_TOKEN, 
+                      new CancelDelegationTokenOp());
+        instances.put(OP_UPDATE_MASTER_KEY, new UpdateMasterKeyOp());
+        instances.put(OP_START_LOG_SEGMENT,
+                      new LogSegmentOp(OP_START_LOG_SEGMENT));
+        instances.put(OP_END_LOG_SEGMENT,
+                      new LogSegmentOp(OP_END_LOG_SEGMENT));
+        return instances;
+      }
+  };
+
   /**
    * Constructor for an EditLog Op. EditLog ops cannot be constructed
    * directly, but only through Reader#readOp.
@@ -76,10 +119,14 @@
     this.txid = txid;
   }
 
-  public abstract void readFields(DataInputStream in, int logVersion)
+  abstract void readFields(DataInputStream in, int logVersion)
       throws IOException;
 
-  static class AddCloseOp extends FSEditLogOp {
+  abstract void writeFields(DataOutputStream out)
+      throws IOException;
+
+  @SuppressWarnings("unchecked")
+  static abstract class AddCloseOp extends FSEditLogOp {
     int length;
     String path;
     short replication;
@@ -97,7 +144,71 @@
       assert(opCode == OP_ADD || opCode == OP_CLOSE);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    <T extends AddCloseOp> T setPath(String path) {
+      this.path = path;
+      return (T)this;
+    }
+
+    <T extends AddCloseOp> T setReplication(short replication) {
+      this.replication = replication;
+      return (T)this;
+    }
+
+    <T extends AddCloseOp> T setModificationTime(long mtime) {
+      this.mtime = mtime;
+      return (T)this;
+    }
+
+    <T extends AddCloseOp> T setAccessTime(long atime) {
+      this.atime = atime;
+      return (T)this;
+    }
+
+    <T extends AddCloseOp> T setBlockSize(long blockSize) {
+      this.blockSize = blockSize;
+      return (T)this;
+    }
+
+    <T extends AddCloseOp> T setBlocks(Block[] blocks) {
+      this.blocks = blocks;
+      return (T)this;
+    }
+
+    <T extends AddCloseOp> T setPermissionStatus(PermissionStatus permissions) {
+      this.permissions = permissions;
+      return (T)this;
+    }
+
+    <T extends AddCloseOp> T setClientName(String clientName) {
+      this.clientName = clientName;
+      return (T)this;
+    }
+
+    <T extends AddCloseOp> T setClientMachine(String clientMachine) {
+      this.clientMachine = clientMachine;
+      return (T)this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      DeprecatedUTF8 nameReplicationPair[] = new DeprecatedUTF8[] { 
+        new DeprecatedUTF8(path), 
+        toLogReplication(replication),
+        toLogLong(mtime),
+        toLogLong(atime),
+        toLogLong(blockSize)};
+      new ArrayWritable(DeprecatedUTF8.class, nameReplicationPair).write(out);
+      new ArrayWritable(Block.class, blocks).write(out);
+      permissions.write(out);
+
+      if (this.opCode == OP_ADD) {
+        new DeprecatedUTF8(clientName).write(out);
+        new DeprecatedUTF8(clientMachine).write(out);
+      }
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       // versions > 0 support per file replication
       // get name and replication
@@ -178,6 +289,26 @@
     }
   }
 
+  static class AddOp extends AddCloseOp {
+    private AddOp() {
+      super(OP_ADD);
+    }
+
+    static AddOp getInstance() {
+      return (AddOp)opInstances.get().get(OP_ADD);
+    }
+  }
+
+  static class CloseOp extends AddCloseOp {
+    private CloseOp() {
+      super(OP_CLOSE);
+    }
+
+    static CloseOp getInstance() {
+      return (CloseOp)opInstances.get().get(OP_CLOSE);
+    }
+  }
+
   static class SetReplicationOp extends FSEditLogOp {
     String path;
     short replication;
@@ -186,7 +317,29 @@
       super(OP_SET_REPLICATION);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static SetReplicationOp getInstance() {
+      return (SetReplicationOp)opInstances.get()
+        .get(OP_SET_REPLICATION);
+    }
+
+    SetReplicationOp setPath(String path) {
+      this.path = path;
+      return this;
+    }
+
+    SetReplicationOp setReplication(short replication) {
+      this.replication = replication;
+      return this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      new DeprecatedUTF8(path).write(out);
+      new DeprecatedUTF8(Short.toString(replication)).write(out);
+    }
+    
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.path = FSImageSerialization.readString(in);
       this.replication = readShort(in);
@@ -203,7 +356,41 @@
       super(OP_CONCAT_DELETE);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static ConcatDeleteOp getInstance() {
+      return (ConcatDeleteOp)opInstances.get()
+        .get(OP_CONCAT_DELETE);
+    }
+
+    ConcatDeleteOp setTarget(String trg) {
+      this.trg = trg;
+      return this;
+    }
+
+    ConcatDeleteOp setSources(String[] srcs) {
+      this.srcs = srcs;
+      return this;
+    }
+
+    ConcatDeleteOp setTimestamp(long timestamp) {
+      this.timestamp = timestamp;
+      return this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      int size = 1 + srcs.length + 1; // trg, srcs, timestamp
+      DeprecatedUTF8 info[] = new DeprecatedUTF8[size];
+      int idx = 0;
+      info[idx++] = new DeprecatedUTF8(trg);
+      for(int i=0; i<srcs.length; i++) {
+        info[idx++] = new DeprecatedUTF8(srcs[i]);
+      }
+      info[idx] = toLogLong(timestamp);
+      new ArrayWritable(DeprecatedUTF8.class, info).write(out);
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.length = in.readInt();
       if (length < 3) { // trg, srcs.., timestam
@@ -230,7 +417,37 @@
       super(OP_RENAME_OLD);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static RenameOldOp getInstance() {
+      return (RenameOldOp)opInstances.get()
+        .get(OP_RENAME_OLD);
+    }
+
+    RenameOldOp setSource(String src) {
+      this.src = src;
+      return this;
+    }
+
+    RenameOldOp setDestination(String dst) {
+      this.dst = dst;
+      return this;
+    }
+
+    RenameOldOp setTimestamp(long timestamp) {
+      this.timestamp = timestamp;
+      return this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      DeprecatedUTF8 info[] = new DeprecatedUTF8[] { 
+        new DeprecatedUTF8(src),
+        new DeprecatedUTF8(dst),
+        toLogLong(timestamp)};
+      new ArrayWritable(DeprecatedUTF8.class, info).write(out);
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.length = in.readInt();
       if (this.length != 3) {
@@ -252,9 +469,32 @@
       super(OP_DELETE);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
-        throws IOException {
+    static DeleteOp getInstance() {
+      return (DeleteOp)opInstances.get()
+        .get(OP_DELETE);
+    }
 
+    DeleteOp setPath(String path) {
+      this.path = path;
+      return this;
+    }
+
+    DeleteOp setTimestamp(long timestamp) {
+      this.timestamp = timestamp;
+      return this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      DeprecatedUTF8 info[] = new DeprecatedUTF8[] { 
+        new DeprecatedUTF8(path),
+        toLogLong(timestamp)};
+      new ArrayWritable(DeprecatedUTF8.class, info).write(out);
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
+        throws IOException {
       this.length = in.readInt();
       if (this.length != 2) {
         throw new IOException("Incorrect data format. "
@@ -274,8 +514,40 @@
     private MkdirOp() {
       super(OP_MKDIR);
     }
+    
+    static MkdirOp getInstance() {
+      return (MkdirOp)opInstances.get()
+        .get(OP_MKDIR);
+    }
 
-    public void readFields(DataInputStream in, int logVersion)
+    MkdirOp setPath(String path) {
+      this.path = path;
+      return this;
+    }
+
+    MkdirOp setTimestamp(long timestamp) {
+      this.timestamp = timestamp;
+      return this;
+    }
+
+    MkdirOp setPermissionStatus(PermissionStatus permissions) {
+      this.permissions = permissions;
+      return this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      DeprecatedUTF8 info[] = new DeprecatedUTF8[] {
+        new DeprecatedUTF8(path),
+        toLogLong(timestamp), // mtime
+        toLogLong(timestamp) // atime, unused at this time
+      };
+      new ArrayWritable(DeprecatedUTF8.class, info).write(out);
+      permissions.write(out);
+    }
+    
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
 
       this.length = in.readInt();
@@ -309,32 +581,70 @@
       super(OP_SET_GENSTAMP);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static SetGenstampOp getInstance() {
+      return (SetGenstampOp)opInstances.get()
+        .get(OP_SET_GENSTAMP);
+    }
+
+    SetGenstampOp setGenerationStamp(long genStamp) {
+      this.genStamp = genStamp;
+      return this;
+    }
+    
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      new LongWritable(genStamp).write(out);
+    }
+    
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.genStamp = in.readLong();
     }
   }
 
+  @SuppressWarnings("deprecation")
   static class DatanodeAddOp extends FSEditLogOp {
-    @SuppressWarnings("deprecation")
     private DatanodeAddOp() {
       super(OP_DATANODE_ADD);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static DatanodeAddOp getInstance() {
+      return (DatanodeAddOp)opInstances.get()
+        .get(OP_DATANODE_ADD);
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      throw new IOException("Deprecated, should not write");
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       //Datanodes are not persistent any more.
       FSImageSerialization.DatanodeImage.skipOne(in);
     }
   }
 
+  @SuppressWarnings("deprecation")
   static class DatanodeRemoveOp extends FSEditLogOp {
-    @SuppressWarnings("deprecation")
     private DatanodeRemoveOp() {
       super(OP_DATANODE_REMOVE);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static DatanodeRemoveOp getInstance() {
+      return (DatanodeRemoveOp)opInstances.get()
+        .get(OP_DATANODE_REMOVE);
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      throw new IOException("Deprecated, should not write");
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       DatanodeID nodeID = new DatanodeID();
       nodeID.readFields(in);
@@ -350,7 +660,29 @@
       super(OP_SET_PERMISSIONS);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static SetPermissionsOp getInstance() {
+      return (SetPermissionsOp)opInstances.get()
+        .get(OP_SET_PERMISSIONS);
+    }
+
+    SetPermissionsOp setSource(String src) {
+      this.src = src;
+      return this;
+    }
+
+    SetPermissionsOp setPermissions(FsPermission permissions) {
+      this.permissions = permissions;
+      return this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      new DeprecatedUTF8(src).write(out);
+      permissions.write(out);
+     }
+ 
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.src = FSImageSerialization.readString(in);
       this.permissions = FsPermission.read(in);
@@ -366,13 +698,42 @@
       super(OP_SET_OWNER);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static SetOwnerOp getInstance() {
+      return (SetOwnerOp)opInstances.get()
+        .get(OP_SET_OWNER);
+    }
+
+    SetOwnerOp setSource(String src) {
+      this.src = src;
+      return this;
+    }
+
+    SetOwnerOp setUser(String username) {
+      this.username = username;
+      return this;
+    }
+
+    SetOwnerOp setGroup(String groupname) {
+      this.groupname = groupname;
+      return this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      DeprecatedUTF8 u = new DeprecatedUTF8(username == null? "": username);
+      DeprecatedUTF8 g = new DeprecatedUTF8(groupname == null? "": groupname);
+      new DeprecatedUTF8(src).write(out);
+      u.write(out);
+      g.write(out);
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.src = FSImageSerialization.readString(in);
       this.username = FSImageSerialization.readString_EmptyAsNull(in);
       this.groupname = FSImageSerialization.readString_EmptyAsNull(in);
     }
-
   }
 
   static class SetNSQuotaOp extends FSEditLogOp {
@@ -383,7 +744,18 @@
       super(OP_SET_NS_QUOTA);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static SetNSQuotaOp getInstance() {
+      return (SetNSQuotaOp)opInstances.get()
+        .get(OP_SET_NS_QUOTA);
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      throw new IOException("Deprecated");      
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.src = FSImageSerialization.readString(in);
       this.nsQuota = readLongWritable(in);
@@ -397,7 +769,18 @@
       super(OP_CLEAR_NS_QUOTA);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static ClearNSQuotaOp getInstance() {
+      return (ClearNSQuotaOp)opInstances.get()
+        .get(OP_CLEAR_NS_QUOTA);
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      throw new IOException("Deprecated");      
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.src = FSImageSerialization.readString(in);
     }
@@ -412,7 +795,35 @@
       super(OP_SET_QUOTA);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static SetQuotaOp getInstance() {
+      return (SetQuotaOp)opInstances.get()
+        .get(OP_SET_QUOTA);
+    }
+
+    SetQuotaOp setSource(String src) {
+      this.src = src;
+      return this;
+    }
+
+    SetQuotaOp setNSQuota(long nsQuota) {
+      this.nsQuota = nsQuota;
+      return this;
+    }
+
+    SetQuotaOp setDSQuota(long dsQuota) {
+      this.dsQuota = dsQuota;
+      return this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      new DeprecatedUTF8(src).write(out);
+      new LongWritable(nsQuota).write(out);
+      new LongWritable(dsQuota).write(out);
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.src = FSImageSerialization.readString(in);
       this.nsQuota = readLongWritable(in);
@@ -430,7 +841,37 @@
       super(OP_TIMES);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static TimesOp getInstance() {
+      return (TimesOp)opInstances.get()
+        .get(OP_TIMES);
+    }
+
+    TimesOp setPath(String path) {
+      this.path = path;
+      return this;
+    }
+
+    TimesOp setModificationTime(long mtime) {
+      this.mtime = mtime;
+      return this;
+    }
+
+    TimesOp setAccessTime(long atime) {
+      this.atime = atime;
+      return this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      DeprecatedUTF8 info[] = new DeprecatedUTF8[] { 
+        new DeprecatedUTF8(path),
+        toLogLong(mtime),
+        toLogLong(atime)};
+      new ArrayWritable(DeprecatedUTF8.class, info).write(out);
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.length = in.readInt();
       if (length != 3) {
@@ -455,7 +896,49 @@
       super(OP_SYMLINK);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static SymlinkOp getInstance() {
+      return (SymlinkOp)opInstances.get()
+        .get(OP_SYMLINK);
+    }
+
+    SymlinkOp setPath(String path) {
+      this.path = path;
+      return this;
+    }
+
+    SymlinkOp setValue(String value) {
+      this.value = value;
+      return this;
+    }
+
+    SymlinkOp setModificationTime(long mtime) {
+      this.mtime = mtime;
+      return this;
+    }
+
+    SymlinkOp setAccessTime(long atime) {
+      this.atime = atime;
+      return this;
+    }
+
+    SymlinkOp setPermissionStatus(PermissionStatus permissionStatus) {
+      this.permissionStatus = permissionStatus;
+      return this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      DeprecatedUTF8 info[] = new DeprecatedUTF8[] { 
+        new DeprecatedUTF8(path),
+        new DeprecatedUTF8(value),
+        toLogLong(mtime),
+        toLogLong(atime)};
+      new ArrayWritable(DeprecatedUTF8.class, info).write(out);
+      permissionStatus.write(out);
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
 
       this.length = in.readInt();
@@ -482,7 +965,43 @@
       super(OP_RENAME);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static RenameOp getInstance() {
+      return (RenameOp)opInstances.get()
+        .get(OP_RENAME);
+    }
+
+    RenameOp setSource(String src) {
+      this.src = src;
+      return this;
+    }
+
+    RenameOp setDestination(String dst) {
+      this.dst = dst;
+      return this;
+    }
+    
+    RenameOp setTimestamp(long timestamp) {
+      this.timestamp = timestamp;
+      return this;
+    }
+    
+    RenameOp setOptions(Rename[] options) {
+      this.options = options;
+      return this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      DeprecatedUTF8 info[] = new DeprecatedUTF8[] { 
+        new DeprecatedUTF8(src),
+        new DeprecatedUTF8(dst),
+        toLogLong(timestamp)};
+      new ArrayWritable(DeprecatedUTF8.class, info).write(out);
+      toBytesWritable(options).write(out);
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.length = in.readInt();
       if (this.length != 3) {
@@ -507,6 +1026,14 @@
       }
       return options;
     }
+
+    static BytesWritable toBytesWritable(Rename... options) {
+      byte[] bytes = new byte[options.length];
+      for (int i = 0; i < options.length; i++) {
+        bytes[i] = options[i].value();
+      }
+      return new BytesWritable(bytes);
+    }
   }
 
   static class ReassignLeaseOp extends FSEditLogOp {
@@ -517,8 +1044,36 @@
     private ReassignLeaseOp() {
       super(OP_REASSIGN_LEASE);
     }
-    
-    public void readFields(DataInputStream in, int logVersion)
+
+    static ReassignLeaseOp getInstance() {
+      return (ReassignLeaseOp)opInstances.get()
+        .get(OP_REASSIGN_LEASE);
+    }
+
+    ReassignLeaseOp setLeaseHolder(String leaseHolder) {
+      this.leaseHolder = leaseHolder;
+      return this;
+    }
+
+    ReassignLeaseOp setPath(String path) {
+      this.path = path;
+      return this;
+    }
+
+    ReassignLeaseOp setNewHolder(String newHolder) {
+      this.newHolder = newHolder;
+      return this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      new DeprecatedUTF8(leaseHolder).write(out);
+      new DeprecatedUTF8(path).write(out);
+      new DeprecatedUTF8(newHolder).write(out);
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.leaseHolder = FSImageSerialization.readString(in);
       this.path = FSImageSerialization.readString(in);
@@ -534,7 +1089,30 @@
       super(OP_GET_DELEGATION_TOKEN);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static GetDelegationTokenOp getInstance() {
+      return (GetDelegationTokenOp)opInstances.get()
+        .get(OP_GET_DELEGATION_TOKEN);
+    }
+
+    GetDelegationTokenOp setDelegationTokenIdentifier(
+        DelegationTokenIdentifier token) {
+      this.token = token;
+      return this;
+    }
+
+    GetDelegationTokenOp setExpiryTime(long expiryTime) {
+      this.expiryTime = expiryTime;
+      return this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      token.write(out);
+      toLogLong(expiryTime).write(out);
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.token = new DelegationTokenIdentifier();
       this.token.readFields(in);
@@ -550,7 +1128,30 @@
       super(OP_RENEW_DELEGATION_TOKEN);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static RenewDelegationTokenOp getInstance() {
+      return (RenewDelegationTokenOp)opInstances.get()
+          .get(OP_RENEW_DELEGATION_TOKEN);
+    }
+
+    RenewDelegationTokenOp setDelegationTokenIdentifier(
+        DelegationTokenIdentifier token) {
+      this.token = token;
+      return this;
+    }
+
+    RenewDelegationTokenOp setExpiryTime(long expiryTime) {
+      this.expiryTime = expiryTime;
+      return this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      token.write(out);
+      toLogLong(expiryTime).write(out);
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.token = new DelegationTokenIdentifier();
       this.token.readFields(in);
@@ -565,7 +1166,24 @@
       super(OP_CANCEL_DELEGATION_TOKEN);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static CancelDelegationTokenOp getInstance() {
+      return (CancelDelegationTokenOp)opInstances.get()
+          .get(OP_CANCEL_DELEGATION_TOKEN);
+    }
+
+    CancelDelegationTokenOp setDelegationTokenIdentifier(
+        DelegationTokenIdentifier token) {
+      this.token = token;
+      return this;
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      token.write(out);
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.token = new DelegationTokenIdentifier();
       this.token.readFields(in);
@@ -579,7 +1197,23 @@
       super(OP_UPDATE_MASTER_KEY);
     }
 
-    public void readFields(DataInputStream in, int logVersion)
+    static UpdateMasterKeyOp getInstance() {
+      return (UpdateMasterKeyOp)opInstances.get()
+          .get(OP_UPDATE_MASTER_KEY);
+    }
+
+    UpdateMasterKeyOp setDelegationKey(DelegationKey key) {
+      this.key = key;
+      return this;
+    }
+    
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+      key.write(out);
+    }
+
+    @Override
+    void readFields(DataInputStream in, int logVersion)
         throws IOException {
       this.key = new DelegationKey();
       this.key.readFields(in);
@@ -593,10 +1227,39 @@
              code == OP_END_LOG_SEGMENT : "Bad op: " + code;
     }
 
+    static LogSegmentOp getInstance(FSEditLogOpCodes code) {
+      return (LogSegmentOp)opInstances.get().get(code);
+    }
+
     public void readFields(DataInputStream in, int logVersion)
         throws IOException {
       // no data stored in these ops yet
     }
+
+    @Override
+    void writeFields(DataOutputStream out) throws IOException {
+      // no data stored
+    }
+  }
+
+  static class InvalidOp extends FSEditLogOp {
+    private InvalidOp() {
+      super(OP_INVALID);
+    }
+
+    static InvalidOp getInstance() {
+      return (InvalidOp)opInstances.get().get(OP_INVALID);
+    }
+
+    @Override 
+    void writeFields(DataOutputStream out) throws IOException {
+    }
+    
+    @Override
+    void readFields(DataInputStream in, int logVersion)
+        throws IOException {
+      // nothing to read
+    }
   }
 
   static private short readShort(DataInputStream in) throws IOException {
@@ -607,6 +1270,14 @@
     return Long.parseLong(FSImageSerialization.readString(in));
   }
 
+  static private DeprecatedUTF8 toLogReplication(short replication) {
+    return new DeprecatedUTF8(Short.toString(replication));
+  }
+  
+  static private DeprecatedUTF8 toLogLong(long timestamp) {
+    return new DeprecatedUTF8(Long.toString(timestamp));
+  }
+
   /**
    * A class to read in blocks stored in the old format. The only two
    * fields in the block were blockid and length.
@@ -710,13 +1381,43 @@
   }
 
   /**
+   * Class for writing editlog ops
+   */
+  public static class Writer {
+    private final DataOutputBuffer buf;
+
+    public Writer(DataOutputBuffer out) {
+      this.buf = out;
+    }
+
+    /**
+     * Write an operation to the output stream
+     * 
+     * @param op The operation to write
+     * @throws IOException if an error occurs during writing.
+     */
+    public void writeOp(FSEditLogOp op) throws IOException {
+      int start = buf.getLength();
+      buf.writeByte(op.opCode.getOpCode());
+      buf.writeLong(op.txid);
+      op.writeFields(buf);
+      int end = buf.getLength();
+      Checksum checksum = FSEditLog.getChecksum();
+      checksum.reset();
+      checksum.update(buf.getData(), start, end-start);
+      int sum = (int)checksum.getValue();
+      buf.writeInt(sum);
+    }
+  }
+
+  /**
    * Class for reading editlog ops from a stream
    */
   public static class Reader {
     private final DataInputStream in;
     private final int logVersion;
     private final Checksum checksum;
-    private EnumMap<FSEditLogOpCodes, FSEditLogOp> opInstances;
+
     /**
      * Construct the reader
      * @param in The stream to read from.
@@ -734,36 +1435,6 @@
       }
       this.logVersion = logVersion;
       this.checksum = checksum;
-      opInstances = new EnumMap<FSEditLogOpCodes, FSEditLogOp>(
-          FSEditLogOpCodes.class);
-      opInstances.put(OP_ADD, new AddCloseOp(OP_ADD));
-      opInstances.put(OP_CLOSE, new AddCloseOp(OP_CLOSE));
-      opInstances.put(OP_SET_REPLICATION, new SetReplicationOp());
-      opInstances.put(OP_CONCAT_DELETE, new ConcatDeleteOp());
-      opInstances.put(OP_RENAME_OLD, new RenameOldOp());
-      opInstances.put(OP_DELETE, new DeleteOp());
-      opInstances.put(OP_MKDIR, new MkdirOp());
-      opInstances.put(OP_SET_GENSTAMP, new SetGenstampOp());
-      opInstances.put(OP_DATANODE_ADD, new DatanodeAddOp());
-      opInstances.put(OP_DATANODE_REMOVE, new DatanodeRemoveOp());
-      opInstances.put(OP_SET_PERMISSIONS, new SetPermissionsOp());
-      opInstances.put(OP_SET_OWNER, new SetOwnerOp());
-      opInstances.put(OP_SET_NS_QUOTA, new SetNSQuotaOp());
-      opInstances.put(OP_CLEAR_NS_QUOTA, new ClearNSQuotaOp());
-      opInstances.put(OP_SET_QUOTA, new SetQuotaOp());
-      opInstances.put(OP_TIMES, new TimesOp());
-      opInstances.put(OP_SYMLINK, new SymlinkOp());
-      opInstances.put(OP_RENAME, new RenameOp());
-      opInstances.put(OP_REASSIGN_LEASE, new ReassignLeaseOp());
-      opInstances.put(OP_GET_DELEGATION_TOKEN, new GetDelegationTokenOp());
-      opInstances.put(OP_RENEW_DELEGATION_TOKEN, new RenewDelegationTokenOp());
-      opInstances.put(OP_CANCEL_DELEGATION_TOKEN,
-                      new CancelDelegationTokenOp());
-      opInstances.put(OP_UPDATE_MASTER_KEY, new UpdateMasterKeyOp());
-      opInstances.put(OP_START_LOG_SEGMENT,
-                      new LogSegmentOp(OP_START_LOG_SEGMENT));
-      opInstances.put(OP_END_LOG_SEGMENT,
-                      new LogSegmentOp(OP_END_LOG_SEGMENT));
     }
 
     /**
@@ -796,7 +1467,7 @@
         return null;
       }
 
-      FSEditLogOp op = opInstances.get(opCode);
+      FSEditLogOp op = opInstances.get().get(opCode);
       if (op == null) {
         throw new IOException("Read invalid opcode " + opCode);
       }
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 6bccb56..993dd8c 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -295,7 +295,7 @@
       StorageDirectory sd = it.next();
       StorageState curState;
       try {
-        curState = sd.analyzeStorage(startOpt);
+        curState = sd.analyzeStorage(startOpt, storage);
         // sd is locked but not opened
         switch(curState) {
         case NON_EXISTENT:
@@ -311,7 +311,8 @@
         }
         if (curState != StorageState.NOT_FORMATTED 
             && startOpt != StartupOption.ROLLBACK) {
-          sd.read(); // read and verify consistency with other directories
+          // read and verify consistency with other directories
+          storage.readProperties(sd);
           isFormatted = true;
         }
         if (startOpt == StartupOption.IMPORT && isFormatted)
@@ -395,7 +396,7 @@
       try {
         // Write the version file, since saveFsImage above only makes the
         // fsimage_<txid>, and the directory is otherwise empty.
-        sd.write();
+        storage.writeProperties(sd);
         
         File prevDir = sd.getPreviousDir();
         File tmpDir = sd.getPreviousTmp();
@@ -433,14 +434,14 @@
       if (!prevDir.exists()) {  // use current directory then
         LOG.info("Storage directory " + sd.getRoot()
                  + " does not contain previous fs state.");
-        sd.read(); // read and verify consistency with other directories
+        // read and verify consistency with other directories
+        storage.readProperties(sd);
         continue;
       }
-      StorageDirectory sdPrev 
-        = prevState.getStorage().new StorageDirectory(sd.getRoot());
 
       // read and verify consistency of the prev dir
-      sdPrev.read(sdPrev.getPreviousVersionFile());
+      prevState.getStorage().readPreviousVersionProperties(sd);
+
       if (prevState.getLayoutVersion() != FSConstants.LAYOUT_VERSION) {
         throw new IOException(
           "Cannot rollback to storage version " +
@@ -604,7 +605,7 @@
     //
     StorageDirectory sdForProperties =
       loadPlan.getStorageDirectoryForProperties();
-    sdForProperties.read();
+    storage.readProperties(sdForProperties);
     File imageFile = loadPlan.getImageFile();
 
     try {
@@ -730,7 +731,6 @@
     storage.setMostRecentCheckpointTxId(txId);
   }
 
-
   /**
    * Save the contents of the FS image to the file.
    */
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 5d62a95..b3332ce 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -164,7 +164,7 @@
 @Metrics(context="dfs")
 public class FSNamesystem implements FSConstants, FSNamesystemMBean,
     FSClusterStats, NameNodeMXBean {
-  public static final Log LOG = LogFactory.getLog(FSNamesystem.class);
+  static final Log LOG = LogFactory.getLog(FSNamesystem.class);
 
   private static final ThreadLocal<StringBuilder> auditBuffer =
     new ThreadLocal<StringBuilder>() {
@@ -239,35 +239,9 @@
   
   // Block pool ID used by this namenode
   String blockPoolId;
-    
-  /**
-   * Stores the datanode -> block map.  
-   * <p>
-   * Done by storing a set of {@link DatanodeDescriptor} objects, sorted by 
-   * storage id. In order to keep the storage map consistent it tracks 
-   * all storages ever registered with the namenode.
-   * A descriptor corresponding to a specific storage id can be
-   * <ul> 
-   * <li>added to the map if it is a new storage id;</li>
-   * <li>updated with a new datanode started as a replacement for the old one 
-   * with the same storage id; and </li>
-   * <li>removed if and only if an existing datanode is restarted to serve a
-   * different storage id.</li>
-   * </ul> <br>
-   * The list of the {@link DatanodeDescriptor}s in the map is checkpointed
-   * in the namespace image file. Only the {@link DatanodeInfo} part is 
-   * persistent, the list of blocks is restored from the datanode block
-   * reports. 
-   * <p>
-   * Mapping: StorageID -> DatanodeDescriptor
-   */
-  public final NavigableMap<String, DatanodeDescriptor> datanodeMap = 
-    new TreeMap<String, DatanodeDescriptor>();
 
   /**
-   * Stores a set of DatanodeDescriptor objects.
-   * This is a subset of {@link #datanodeMap}, containing nodes that are 
-   * considered alive.
+   * Stores a subset of datanodeMap, containing nodes that are considered alive.
    * The HeartbeatMonitor periodically checks for out-dated entries,
    * and removes them from the list.
    */
@@ -291,9 +265,6 @@
 
   // heartbeatRecheckInterval is how often namenode checks for expired datanodes
   private long heartbeatRecheckInterval;
-  // heartbeatExpireInterval is how long namenode waits for datanode to report
-  // heartbeat
-  private long heartbeatExpireInterval;
 
   //resourceRecheckInterval is how often namenode checks for the disk space availability
   private long resourceRecheckInterval;
@@ -316,9 +287,6 @@
    */
   private final GenerationStamp generationStamp = new GenerationStamp();
 
-  // Ask Datanode only up to this many blocks to delete.
-  public int blockInvalidateLimit = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
-
   // precision of access times.
   private long accessTimePrecision = 0;
 
@@ -515,14 +483,9 @@
     this.defaultPermission = PermissionStatus.createImmutable(
         fsOwner.getShortUserName(), supergroup, new FsPermission(filePermission));
 
-    long heartbeatInterval = conf.getLong(
-        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
-        DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000;
     this.heartbeatRecheckInterval = conf.getInt(
         DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
         DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
-    this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval +
-      10 * heartbeatInterval;
     
     this.serverDefaults = new FsServerDefaults(
         conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE),
@@ -533,14 +496,6 @@
     this.maxFsObjects = conf.getLong(DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY, 
                                      DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT);
 
-    //default limit
-    this.blockInvalidateLimit = Math.max(this.blockInvalidateLimit, 
-                                         20*(int)(heartbeatInterval/1000));
-    //use conf value if it is set.
-    this.blockInvalidateLimit = conf.getInt(
-        DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, this.blockInvalidateLimit);
-    LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY + "=" + this.blockInvalidateLimit);
-
     this.accessTimePrecision = conf.getLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 0);
     this.supportAppends = conf.getBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY,
                                       DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT);
@@ -644,12 +599,7 @@
       out.println("Live Datanodes: "+live.size());
       out.println("Dead Datanodes: "+dead.size());
       blockManager.metaSave(out);
-  
-      //
-      // Dump all datanodes
-      //
-      datanodeDump(out);
-  
+
       out.flush();
       out.close();
     } finally {
@@ -690,45 +640,7 @@
     readLock();
     try {
       checkSuperuserPrivilege();
-  
-      DatanodeDescriptor node = getDatanode(datanode);
-      if (node == null) {
-        NameNode.stateChangeLog.warn("BLOCK* NameSystem.getBlocks: "
-            + "Asking for blocks from an unrecorded node " + datanode.getName());
-        throw new IllegalArgumentException(
-            "Unexpected exception.  Got getBlocks message for datanode " +
-            datanode.getName() + ", but there is no info for it");
-      }
-  
-      int numBlocks = node.numBlocks();
-      if(numBlocks == 0) {
-        return new BlocksWithLocations(new BlockWithLocations[0]);
-      }
-      Iterator<BlockInfo> iter = node.getBlockIterator();
-      int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
-      // skip blocks
-      for(int i=0; i<startBlock; i++) {
-        iter.next();
-      }
-      List<BlockWithLocations> results = new ArrayList<BlockWithLocations>();
-      long totalSize = 0;
-      BlockInfo curBlock;
-      while(totalSize<size && iter.hasNext()) {
-        curBlock = iter.next();
-        if(!curBlock.isComplete())  continue;
-        totalSize += addBlock(curBlock, results);
-      }
-      if(totalSize<size) {
-        iter = node.getBlockIterator(); // start from the beginning
-        for(int i=0; i<startBlock&&totalSize<size; i++) {
-          curBlock = iter.next();
-          if(!curBlock.isComplete())  continue;
-          totalSize += addBlock(curBlock, results);
-        }
-      }
-  
-      return new BlocksWithLocations(
-          results.toArray(new BlockWithLocations[results.size()]));
+      return blockManager.getBlocksWithLocations(datanode, size);  
     } finally {
       readUnlock();
     }
@@ -744,22 +656,6 @@
         : ExportedBlockKeys.DUMMY_KEYS;
   }
 
-  /**
-   * Get all valid locations of the block & add the block to results
-   * return the length of the added block; 0 if the block is not added
-   */
-  private long addBlock(Block block, List<BlockWithLocations> results) {
-    assert hasReadOrWriteLock();
-    ArrayList<String> machineSet = blockManager.getValidLocations(block);
-    if(machineSet.size() == 0) {
-      return 0;
-    } else {
-      results.add(new BlockWithLocations(block, 
-          machineSet.toArray(new String[machineSet.size()])));
-      return block.getNumBytes();
-    }
-  }
-
   /////////////////////////////////////////////////////////
   //
   // These methods are called by HadoopFS clients
@@ -1797,7 +1693,8 @@
       //find datanode descriptors
       chosen = new ArrayList<DatanodeDescriptor>();
       for(DatanodeInfo d : existings) {
-        final DatanodeDescriptor descriptor = getDatanode(d);
+        final DatanodeDescriptor descriptor = blockManager.getDatanodeManager(
+            ).getDatanode(d);
         if (descriptor != null) {
           chosen.add(descriptor);
         }
@@ -2624,7 +2521,8 @@
         if (newtargets.length > 0) {
           descriptors = new DatanodeDescriptor[newtargets.length];
           for(int i = 0; i < newtargets.length; i++) {
-            descriptors[i] = getDatanode(newtargets[i]);
+            descriptors[i] = blockManager.getDatanodeManager().getDatanode(
+                newtargets[i]);
           }
         }
         if (closeFile) {
@@ -2768,15 +2666,6 @@
     return Storage.getRegistrationID(dir.fsImage.getStorage());
   }
 
-  public boolean isDatanodeDead(DatanodeDescriptor node) {
-    return (node.getLastUpdate() <
-            (now() - heartbeatExpireInterval));
-  }
-    
-  private void setDatanodeDead(DatanodeDescriptor node) throws IOException {
-    node.setLastUpdate(0);
-  }
-
   /**
    * The given node has reported in.  This method should:
    * 1) Record the heartbeat, so the datanode isn't timed out
@@ -2794,91 +2683,32 @@
         throws IOException {
     readLock();
     try {
-      return handleHeartbeatInternal(nodeReg, capacity, dfsUsed, 
-          remaining, blockPoolUsed, xceiverCount, xmitsInProgress, 
-          failedVolumes);
+      final int maxTransfer = blockManager.maxReplicationStreams - xmitsInProgress;
+      DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
+          nodeReg, blockPoolId, capacity, dfsUsed, remaining, blockPoolUsed,
+          xceiverCount, maxTransfer, failedVolumes);
+      if (cmds != null) {
+        return cmds;
+      }
+
+      //check distributed upgrade
+      DatanodeCommand cmd = getDistributedUpgradeCommand();
+      if (cmd != null) {
+        return new DatanodeCommand[] {cmd};
+      }
+      return null;
     } finally {
       readUnlock();
     }
   }
 
-  /** @see #handleHeartbeat(DatanodeRegistration, long, long, long, long, int, int, int) */
-  DatanodeCommand[] handleHeartbeatInternal(DatanodeRegistration nodeReg,
-      long capacity, long dfsUsed, long remaining, long blockPoolUsed,
-      int xceiverCount, int xmitsInProgress, int failedVolumes) 
-        throws IOException {
-    assert hasReadLock();
-    DatanodeCommand cmd = null;
-    synchronized (heartbeats) {
-      synchronized (datanodeMap) {
-        DatanodeDescriptor nodeinfo = null;
-        try {
-          nodeinfo = getDatanode(nodeReg);
-        } catch(UnregisteredNodeException e) {
-          return new DatanodeCommand[]{DatanodeCommand.REGISTER};
-        }
-        
-        // Check if this datanode should actually be shutdown instead. 
-        if (nodeinfo != null && nodeinfo.isDisallowed()) {
-          setDatanodeDead(nodeinfo);
-          throw new DisallowedDatanodeException(nodeinfo);
-        }
-         
-        if (nodeinfo == null || !nodeinfo.isAlive) {
-          return new DatanodeCommand[]{DatanodeCommand.REGISTER};
-        }
-
-        updateStats(nodeinfo, false);
-        nodeinfo.updateHeartbeat(capacity, dfsUsed, remaining, blockPoolUsed,
-            xceiverCount, failedVolumes);
-        updateStats(nodeinfo, true);
-        
-        //check lease recovery
-        BlockInfoUnderConstruction[] blocks = nodeinfo
-            .getLeaseRecoveryCommand(Integer.MAX_VALUE);
-        if (blocks != null) {
-          BlockRecoveryCommand brCommand = new BlockRecoveryCommand(
-              blocks.length);
-          for (BlockInfoUnderConstruction b : blocks) {
-            brCommand.add(new RecoveringBlock(
-                new ExtendedBlock(blockPoolId, b), b.getExpectedLocations(), b
-                    .getBlockRecoveryId()));
-          }
-          return new DatanodeCommand[] { brCommand };
-        }
-      
-        ArrayList<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>(3);
-        //check pending replication
-        List<BlockTargetPair> pendingList = nodeinfo.getReplicationCommand(
-              blockManager.maxReplicationStreams - xmitsInProgress);
-        if (pendingList != null) {
-          cmd = new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
-              pendingList);
-          cmds.add(cmd);
-        }
-        //check block invalidation
-        Block[] blks = nodeinfo.getInvalidateBlocks(blockInvalidateLimit);
-        if (blks != null) {
-          cmd = new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, blockPoolId, blks);
-          cmds.add(cmd);
-        }
-        // check access key update
-        if (isBlockTokenEnabled && nodeinfo.needKeyUpdate) {
-          cmds.add(new KeyUpdateCommand(blockTokenSecretManager.exportKeys()));
-          nodeinfo.needKeyUpdate = false;
-        }
-        if (!cmds.isEmpty()) {
-          return cmds.toArray(new DatanodeCommand[cmds.size()]);
-        }
-      }
+  public void addKeyUpdateCommand(final List<DatanodeCommand> cmds,
+      final DatanodeDescriptor nodeinfo) {
+    // check access key update
+    if (isBlockTokenEnabled && nodeinfo.needKeyUpdate) {
+      cmds.add(new KeyUpdateCommand(blockTokenSecretManager.exportKeys()));
+      nodeinfo.needKeyUpdate = false;
     }
-
-    //check distributed upgrade
-    cmd = getDistributedUpgradeCommand();
-    if (cmd != null) {
-      return new DatanodeCommand[] {cmd};
-    }
-    return null;
   }
 
   public void updateStats(DatanodeDescriptor node, boolean isAdded) {
@@ -3019,7 +2849,8 @@
       ) throws UnregisteredNodeException {
     writeLock();
     try {
-      DatanodeDescriptor nodeInfo = getDatanode(nodeID);
+      DatanodeDescriptor nodeInfo = getBlockManager().getDatanodeManager(
+          ).getDatanode(nodeID);
       if (nodeInfo != null) {
         removeDatanode(nodeInfo);
       } else {
@@ -3035,7 +2866,7 @@
    * Remove a datanode descriptor.
    * @param nodeInfo datanode descriptor.
    */
-  private void removeDatanode(DatanodeDescriptor nodeInfo) {
+  public void removeDatanode(DatanodeDescriptor nodeInfo) {
     assert hasWriteLock();
     synchronized (heartbeats) {
       if (nodeInfo.isAlive) {
@@ -3066,6 +2897,7 @@
    * effect causes more datanodes to be declared dead.
    */
   void heartbeatCheck() {
+    final DatanodeManager datanodeManager = getBlockManager().getDatanodeManager();
     // It's OK to check safe mode w/o taking the lock here, we re-check
     // for safe mode after taking the lock before removing a datanode.
     if (isInSafeMode()) {
@@ -3081,7 +2913,7 @@
         for (Iterator<DatanodeDescriptor> it = heartbeats.iterator();
              it.hasNext();) {
           DatanodeDescriptor nodeInfo = it.next();
-          if (isDatanodeDead(nodeInfo)) {
+          if (datanodeManager.isDatanodeDead(nodeInfo)) {
             expiredHeartbeats.incr();
             foundDead = true;
             nodeID = nodeInfo;
@@ -3097,21 +2929,7 @@
           return;
         }
         try {
-          synchronized(heartbeats) {
-            synchronized (datanodeMap) {
-              DatanodeDescriptor nodeInfo = null;
-              try {
-                nodeInfo = getDatanode(nodeID);
-              } catch (IOException e) {
-                nodeInfo = null;
-              }
-              if (nodeInfo != null && isDatanodeDead(nodeInfo)) {
-                NameNode.stateChangeLog.info("BLOCK* NameSystem.heartbeatCheck: "
-                                             + "lost heartbeat from " + nodeInfo.getName());
-                removeDatanode(nodeInfo);
-              }
-            }
-          }
+          datanodeManager.removeDeadDatanode(nodeID);
         } finally {
           writeUnlock();
         }
@@ -3131,7 +2949,8 @@
     writeLock();
     startTime = now(); //after acquiring write lock
     try {
-      DatanodeDescriptor node = getDatanode(nodeID);
+      final DatanodeDescriptor node = blockManager.getDatanodeManager(
+          ).getDatanode(nodeID);
       if (node == null || !node.isAlive) {
         throw new IOException("ProcessReport from dead or unregistered node: "
                               + nodeID.getName());
@@ -3271,7 +3090,8 @@
                                          ) throws IOException {
     writeLock();
     try {
-      DatanodeDescriptor node = getDatanode(nodeID);
+      final DatanodeDescriptor node = blockManager.getDatanodeManager(
+          ).getDatanode(nodeID);
       if (node == null || !node.isAlive) {
         NameNode.stateChangeLog.warn("BLOCK* NameSystem.blockReceived: " + block
             + " is received from dead or unregistered node " + nodeID.getName());
@@ -3477,33 +3297,7 @@
                                           ArrayList<DatanodeDescriptor> dead) {
     readLock();
     try {
-      final List<DatanodeDescriptor> results = getBlockManager(
-          ).getDatanodeManager().getDatanodeListForReport(DatanodeReportType.ALL);    
-      for(Iterator<DatanodeDescriptor> it = results.iterator(); it.hasNext();) {
-        DatanodeDescriptor node = it.next();
-        if (isDatanodeDead(node))
-          dead.add(node);
-        else
-          live.add(node);
-      }
-    } finally {
-      readUnlock();
-    }
-  }
-
-  /**
-   * Prints information about all datanodes.
-   */
-  private void datanodeDump(PrintWriter out) {
-    readLock();
-    try {
-      synchronized (datanodeMap) {
-        out.println("Metasave: Number of datanodes: " + datanodeMap.size());
-        for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); it.hasNext();) {
-          DatanodeDescriptor node = it.next();
-          out.println(node.dumpDatanode());
-        }
-      }
+      getBlockManager().getDatanodeManager().fetchDatanodess(live, dead);
     } finally {
       readUnlock();
     }
@@ -3558,30 +3352,6 @@
     checkSuperuserPrivilege();
     getFSImage().finalizeUpgrade();
   }
-    
-    
-  /**
-   * Get data node by storage ID.
-   * 
-   * @param nodeID
-   * @return DatanodeDescriptor or null if the node is not found.
-   * @throws IOException
-   */
-  public DatanodeDescriptor getDatanode(DatanodeID nodeID
-      ) throws UnregisteredNodeException {
-    assert hasReadOrWriteLock();
-    UnregisteredNodeException e = null;
-    DatanodeDescriptor node = datanodeMap.get(nodeID.getStorageID());
-    if (node == null) 
-      return null;
-    if (!node.getName().equals(nodeID.getName())) {
-      e = new UnregisteredNodeException(nodeID, node);
-      NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: "
-                                    + e.getLocalizedMessage());
-      throw e;
-    }
-    return node;
-  }
 
   /**
    * SafeModeInfo contains information related to the safe mode.
@@ -4490,43 +4260,14 @@
   }
   
 
-  /**
-   * Number of live data nodes
-   * @return Number of live data nodes
-   */
   @Override // FSNamesystemMBean
   public int getNumLiveDataNodes() {
-    int numLive = 0;
-    synchronized (datanodeMap) {   
-      for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); 
-                                                               it.hasNext();) {
-        DatanodeDescriptor dn = it.next();
-        if (!isDatanodeDead(dn) ) {
-          numLive++;
-        }
-      }
-    }
-    return numLive;
+    return getBlockManager().getDatanodeManager().getNumLiveDataNodes();
   }
-  
 
-  /**
-   * Number of dead data nodes
-   * @return Number of dead data nodes
-   */
   @Override // FSNamesystemMBean
   public int getNumDeadDataNodes() {
-    int numDead = 0;
-    synchronized (datanodeMap) {   
-      for(Iterator<DatanodeDescriptor> it = datanodeMap.values().iterator(); 
-                                                               it.hasNext();) {
-        DatanodeDescriptor dn = it.next();
-        if (isDatanodeDead(dn) ) {
-          numDead++;
-        }
-      }
-    }
-    return numDead;
+    return getBlockManager().getDatanodeManager().getNumDeadDataNodes();
   }
 
   /**
@@ -4686,11 +4427,12 @@
     blockinfo.setNumBytes(newBlock.getNumBytes());
 
     // find the DatanodeDescriptor objects
+    final DatanodeManager dm = getBlockManager().getDatanodeManager();
     DatanodeDescriptor[] descriptors = null;
     if (newNodes.length > 0) {
       descriptors = new DatanodeDescriptor[newNodes.length];
       for(int i = 0; i < newNodes.length; i++) {
-        descriptors[i] = getDatanode(newNodes[i]);
+        descriptors[i] = dm.getDatanode(newNodes[i]);
       }
     }
     blockinfo.setExpectedLocations(descriptors);
@@ -4817,12 +4559,6 @@
     return blockManager.numCorruptReplicas(blk);
   }
 
-  /** Get a datanode descriptor given corresponding storageID */
-  public DatanodeDescriptor getDatanode(String nodeID) {
-    assert hasReadOrWriteLock();
-    return datanodeMap.get(nodeID);
-  }
-
   /**
    * Return a range of corrupt replica block ids. Up to numExpectedBlocks 
    * blocks starting at the next block after startingBlockId are returned
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index 3af0f0a..00461e2 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -490,7 +490,7 @@
    * in this filesystem. */
   private void format(StorageDirectory sd) throws IOException {
     sd.clearDirectory(); // create currrent dir
-    sd.write();
+    writeProperties(sd);
     writeTransactionIdFile(sd, 0);
 
     LOG.info("Storage directory " + sd.getRoot()
@@ -533,10 +533,9 @@
   }
 
   @Override // Storage
-  protected void getFields(Properties props,
-                           StorageDirectory sd
-                           ) throws IOException {
-    super.getFields(props, sd);
+  protected void setFieldsFromProperties(
+      Properties props, StorageDirectory sd) throws IOException {
+    super.setFieldsFromProperties(props, sd);
     if (layoutVersion == 0) {
       throw new IOException("NameNode directory "
                             + sd.getRoot() + " is not formatted.");
@@ -592,10 +591,10 @@
    * @throws IOException
    */
   @Override // Storage
-  protected void setFields(Properties props,
+  protected void setPropertiesFromFields(Properties props,
                            StorageDirectory sd
                            ) throws IOException {
-    super.setFields(props, sd);
+    super.setPropertiesFromFields(props, sd);
     // Set blockpoolID in version with federation support
     if (LayoutVersion.supports(Feature.FEDERATION, layoutVersion)) {
       props.setProperty("blockpoolID", blockpoolID);
@@ -927,7 +926,7 @@
     while(sdit.hasNext()) {
       StorageDirectory sd = sdit.next();
       try {
-        Properties props = sd.readFrom(sd.getVersionFile());
+        Properties props = readPropertiesFile(sd.getVersionFile());
         cid = props.getProperty("clusterID");
         LOG.info("current cluster id for sd="+sd.getCurrentDir() + 
             ";lv=" + layoutVersion + ";cid=" + cid);
@@ -1027,7 +1026,7 @@
         FSImage.LOG.warn("Storage directory " + sd + " contains no VERSION file. Skipping...");
         continue;
       }
-      sd.read(); // sets layoutVersion
+      readProperties(sd); // sets layoutVersion
       minLayoutVersion = Math.min(minLayoutVersion, getLayoutVersion());
       maxLayoutVersion = Math.max(maxLayoutVersion, getLayoutVersion());
     }
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 99e274b..7450afc 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -43,7 +43,7 @@
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -157,20 +157,20 @@
    * Following are nameservice specific keys.
    */
   public static final String[] NAMESERVICE_SPECIFIC_KEYS = {
-    DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
-    DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
-    DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
-    DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
-    DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
-    DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-    DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
-    DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
-    DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
-    DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
-    DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
-    DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
-    DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
-    DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY
+    DFS_NAMENODE_RPC_ADDRESS_KEY,
+    DFS_NAMENODE_NAME_DIR_KEY,
+    DFS_NAMENODE_EDITS_DIR_KEY,
+    DFS_NAMENODE_CHECKPOINT_DIR_KEY,
+    DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
+    DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
+    DFS_NAMENODE_HTTP_ADDRESS_KEY,
+    DFS_NAMENODE_HTTPS_ADDRESS_KEY,
+    DFS_NAMENODE_KEYTAB_FILE_KEY,
+    DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+    DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
+    DFS_NAMENODE_BACKUP_ADDRESS_KEY,
+    DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
+    DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY
   };
   
   public long getProtocolVersion(String protocol, 
@@ -265,7 +265,7 @@
   public static void setServiceAddress(Configuration conf,
                                            String address) {
     LOG.info("Setting ADDRESS " + address);
-    conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, address);
+    conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, address);
   }
   
   /**
@@ -277,7 +277,7 @@
    */
   public static InetSocketAddress getServiceAddress(Configuration conf,
                                                         boolean fallback) {
-    String addr = conf.get(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
+    String addr = conf.get(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
     if (addr == null || addr.isEmpty()) {
       return fallback ? getAddress(conf) : null;
     }
@@ -363,11 +363,11 @@
 
   protected InetSocketAddress getHttpServerAddress(Configuration conf) {
     return  NetUtils.createSocketAddr(
-        conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:50070"));
+        conf.get(DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_HTTP_ADDRESS_DEFAULT));
   }
   
   protected void setHttpServerAddress(Configuration conf) {
-    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
+    conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY,
         getHostPortString(getHttpAddress()));
   }
 
@@ -392,8 +392,8 @@
    */
   void loginAsNameNodeUser(Configuration conf) throws IOException {
     InetSocketAddress socAddr = getRpcServerAddress(conf);
-    SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
-        DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, socAddr.getHostName());
+    SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
+        DFS_NAMENODE_USER_NAME_KEY, socAddr.getHostName());
   }
   
   /**
@@ -406,8 +406,8 @@
     UserGroupInformation.setConfiguration(conf);
     loginAsNameNodeUser(conf);
     int handlerCount = 
-      conf.getInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 
-                  DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT);
+      conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY, 
+                  DFS_DATANODE_HANDLER_COUNT_DEFAULT);
 
     NameNode.initMetrics(conf, this.getRole());
     loadNamesystem(conf);
@@ -415,8 +415,8 @@
     InetSocketAddress dnSocketAddr = getServiceRpcServerAddress(conf);
     if (dnSocketAddr != null) {
       int serviceHandlerCount =
-        conf.getInt(DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
-                    DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
+        conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
+                    DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
       this.serviceRpcServer = RPC.getServer(NamenodeProtocols.class, this,
           dnSocketAddr.getHostName(), dnSocketAddr.getPort(), serviceHandlerCount,
           false, conf, namesystem.getDelegationTokenSecretManager());
@@ -494,7 +494,8 @@
     }
     startTrashEmptier(conf);
     
-    plugins = conf.getInstances("dfs.namenode.plugins", ServicePlugin.class);
+    plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
+        ServicePlugin.class);
     for (ServicePlugin p: plugins) {
       try {
         p.start(this);
@@ -1308,12 +1309,12 @@
   private static boolean format(Configuration conf,
                                 boolean isConfirmationNeeded)
       throws IOException {
-    if (!conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, 
-                         DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT)) {
-      throw new IOException("The option " + DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY
+    if (!conf.getBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, 
+                         DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT)) {
+      throw new IOException("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY
                              + " is set to false for this filesystem, so it "
                              + "cannot be formatted. You will need to set "
-                             + DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY +" parameter "
+                             + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY +" parameter "
                              + "to true in order to format this filesystem");
     }
     
@@ -1456,11 +1457,11 @@
   }
 
   private static void setStartupOption(Configuration conf, StartupOption opt) {
-    conf.set("dfs.namenode.startup", opt.toString());
+    conf.set(DFS_NAMENODE_STARTUP_KEY, opt.toString());
   }
 
   static StartupOption getStartupOption(Configuration conf) {
-    return StartupOption.valueOf(conf.get("dfs.namenode.startup",
+    return StartupOption.valueOf(conf.get(DFS_NAMENODE_STARTUP_KEY,
                                           StartupOption.REGULAR.toString()));
   }
 
@@ -1552,10 +1553,10 @@
     
     DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
     
-    if (conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
+    if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
       URI defaultUri = URI.create(FSConstants.HDFS_URI_SCHEME + "://"
-          + conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY));
-      conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, defaultUri.toString());
+          + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
+      conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
     }
   }
     
diff --git a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index 014c3eb..f126f17 100644
--- a/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -41,7 +41,7 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -182,8 +182,8 @@
   
   public static InetSocketAddress getHttpAddress(Configuration conf) {
     return NetUtils.createSocketAddr(conf.get(
-        DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
+        DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+        DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
   }
   
   /**
@@ -196,15 +196,12 @@
     infoBindAddress = infoSocAddr.getHostName();
     UserGroupInformation.setConfiguration(conf);
     if (UserGroupInformation.isSecurityEnabled()) {
-      SecurityUtil.login(conf, 
-          DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
-          DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY,
-          infoBindAddress);
+      SecurityUtil.login(conf, DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
+          DFS_SECONDARY_NAMENODE_USER_NAME_KEY, infoBindAddress);
     }
     // initiate Java VM metrics
     JvmMetrics.create("SecondaryNameNode",
-        conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
-        DefaultMetricsSystem.instance());
+        conf.get(DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance());
     
     // Create connection to the namenode.
     shouldRun = true;
@@ -226,13 +223,13 @@
 
     // Initialize other scheduling parameters from the configuration
     checkpointCheckPeriod = conf.getLong(
-        DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY,
-        DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT);
+        DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY,
+        DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT);
         
-    checkpointPeriod = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 
-                                    DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
-    checkpointTxnCount = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 
-                                  DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT);
+    checkpointPeriod = conf.getLong(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 
+                                    DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
+    checkpointTxnCount = conf.getLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 
+                                  DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT);
     warnForDeprecatedConfigs(conf);
 
     // initialize the webserver for uploading files.
@@ -240,9 +237,9 @@
     UserGroupInformation httpUGI = 
       UserGroupInformation.loginUserFromKeytabAndReturnUGI(
           SecurityUtil.getServerPrincipal(conf
-              .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
+              .get(DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
               infoBindAddress),
-          conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
+          conf.get(DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
     try {
       infoServer = httpUGI.doAs(new PrivilegedExceptionAction<HttpServer>() {
         @Override
@@ -253,7 +250,7 @@
           int tmpInfoPort = infoSocAddr.getPort();
           infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
               tmpInfoPort == 0, conf, 
-              new AccessControlList(conf.get(DFSConfigKeys.DFS_ADMIN, " ")));
+              new AccessControlList(conf.get(DFS_ADMIN, " ")));
           
           if(UserGroupInformation.isSecurityEnabled()) {
             System.setProperty("https.cipherSuites", 
@@ -286,7 +283,7 @@
       imagePort = infoPort;
     }
     
-    conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" +infoPort); 
+    conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" +infoPort); 
     LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
     LOG.info("Secondary image servlet up at: " + infoBindAddress + ":" + imagePort);
     LOG.info("Checkpoint Period   :" + checkpointPeriod + " secs " +
@@ -301,7 +298,7 @@
       if (conf.get(key) != null) {
         LOG.warn("Configuration key " + key + " is deprecated! Ignoring..." +
             " Instead please specify a value for " +
-            DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY);
+            DFS_NAMENODE_CHECKPOINT_TXNS_KEY);
       }
     }
   }
@@ -796,7 +793,7 @@
         
         StorageState curState;
         try {
-          curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR);
+          curState = sd.analyzeStorage(HdfsConstants.StartupOption.REGULAR, storage);
           // sd is locked but not opened
           switch(curState) {
           case NON_EXISTENT:
@@ -810,7 +807,7 @@
             // (a) the VERSION file for each of the directories is the same,
             // and (b) when we connect to a NN, we can verify that the remote
             // node matches the same namespace that we ran on previously.
-            sd.read();
+            storage.readProperties(sd);
             break;
           default:  // recovery is possible
             sd.doRecover(curState);
diff --git a/hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java b/hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java
index bbd7727..61ab200 100644
--- a/hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java
+++ b/hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java
@@ -236,7 +236,7 @@
 
   private static void initLoggers() {
     ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) TestFiPipelines.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
index c6e6e5f..ec33f76 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
@@ -33,6 +33,7 @@
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -221,7 +222,9 @@
         .build();
       fail("Was able to start NN from 0.3.0 image");
     } catch (IOException ioe) {
-      assertTrue(ioe.toString().contains("Old layout version is 'too old'"));
+      if (!ioe.toString().contains("Old layout version is 'too old'")) {
+        throw ioe;
+      }
     }
   }
   
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java
index 4eedee4..b061f26 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java
@@ -21,6 +21,7 @@
 
 import junit.framework.TestCase;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
@@ -44,7 +45,7 @@
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
index 491a0b5..2a88f78 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
@@ -226,7 +226,7 @@
     writeConfigFile(excludeFile, nodes);
     cluster.getNamesystem(nnIndex).refreshNodes(conf);
     DatanodeInfo ret = NameNodeAdapter.getDatanode(
-        cluster.getNameNode(nnIndex), info[index]);
+        cluster.getNamesystem(nnIndex), info[index]);
     waitNodeState(ret, waitForState);
     return ret;
   }
@@ -466,7 +466,7 @@
       // Stop decommissioning and verify stats
       writeConfigFile(excludeFile, null);
       fsn.refreshNodes(conf);
-      DatanodeInfo ret = NameNodeAdapter.getDatanode(namenode, downnode);
+      DatanodeInfo ret = NameNodeAdapter.getDatanode(fsn, downnode);
       waitNodeState(ret, AdminStates.NORMAL);
       verifyStats(namenode, fsn, ret, false);
     }
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java
index 9733695..c63c4ec 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java
@@ -37,6 +37,7 @@
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.log4j.Level;
 
@@ -49,7 +50,7 @@
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
index 1bc26ba..b6f6109 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
@@ -24,6 +24,7 @@
 import junit.framework.Test;
 import junit.framework.TestSuite;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -46,7 +47,7 @@
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java
index 21f55a6..7a013d4 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java
@@ -68,7 +68,7 @@
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileConcurrentReader.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
index a09b17e..577765e 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
@@ -54,7 +55,7 @@
 
   {
     ((Log4JLogger) LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }
 
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
index 8189599..5f67352 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -27,6 +27,7 @@
 import junit.framework.TestCase;
 
 import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
@@ -48,7 +49,7 @@
 public class TestFileCorruption extends TestCase {
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
   }
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
index fa69017..01cab9e 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -25,6 +25,7 @@
 import java.net.InetSocketAddress;
 import java.util.EnumSet;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
@@ -59,7 +60,7 @@
   {
     //((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }
 
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java
index 1e034a0..3516cf6 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -40,7 +41,7 @@
   {
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
   }
 
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java
index 8823f10..b6ee7025 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java
@@ -19,6 +19,7 @@
 
 import java.io.IOException;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -33,7 +34,7 @@
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
   }
 
   public void testFileCreationDeleteParent() throws IOException {
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
index 038f0fd..ad022b3 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
@@ -21,7 +21,7 @@
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 
 /**
  * Test empty file creation.
@@ -40,7 +40,7 @@
     Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
       public void uncaughtException(Thread t, Throwable e) {
         if (e instanceof ConcurrentModificationException) {
-          FSNamesystem.LOG.error("t=" + t, e);
+          LeaseManager.LOG.error("t=" + t, e);
           isConcurrentModificationException = true;
         }
       }
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
index 6b9e19c..badd2fb 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
@@ -26,6 +26,7 @@
 import java.io.IOException;
 import java.util.Random;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -51,7 +52,7 @@
  */
 public class TestFileStatus {
   {
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)FileSystem.LOG).getLogger().setLevel(Level.ALL);
   }
 
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
index 69b0a47..1d3dd52 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
@@ -57,7 +57,7 @@
   {
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
   }
 
   static final private long BLOCK_SIZE = 1024;
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
index 2a5d8ae..3174f38 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
@@ -24,6 +24,7 @@
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicReference;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -53,7 +54,7 @@
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java
index 4b97136..3a3dde8 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java
@@ -160,7 +160,7 @@
 
   private static void initLoggers() {
     ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java
index 823c4c6..3bffd20 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java
@@ -21,6 +21,7 @@
 import java.io.OutputStream;
 import java.security.PrivilegedExceptionAction;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -38,7 +39,7 @@
 /** Test reading from hdfs while a file is being written. */
 public class TestReadWhileWriting {
   {
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
   }
 
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
index 03e20d1..54d515a 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
@@ -19,6 +19,7 @@
 
 import java.io.IOException;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -33,7 +34,7 @@
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
   }
 
   //TODO: un-comment checkFullFile once the lease recovery is done
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
index c96e20d..5771b22 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
@@ -409,18 +409,15 @@
    */
   public static File[] createNameNodeVersionFile(Configuration conf,
       File[] parent, StorageInfo version, String bpid) throws IOException {
-    Storage storage = null;
-    File[] versionFiles = new File[parent.length];
-    for (int i = 0; i < parent.length; i++) {
-      File versionFile = new File(parent[i], "VERSION");
-      FileUtil.fullyDelete(versionFile);
-      storage = new NNStorage(conf, 
+    Storage storage = new NNStorage(conf, 
                               Collections.<URI>emptyList(), 
                               Collections.<URI>emptyList());
-      storage.setStorageInfo(version);
-      StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());
-      sd.write(versionFile);
-      versionFiles[i] = versionFile;
+    storage.setStorageInfo(version);
+    File[] versionFiles = new File[parent.length];
+    for (int i = 0; i < parent.length; i++) {
+      versionFiles[i] = new File(parent[i], "VERSION");
+      StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
+      storage.writeProperties(versionFiles[i], sd);
     }
     return versionFiles;
   }
@@ -453,14 +450,13 @@
    */
   public static void createDataNodeVersionFile(File[] parent,
       StorageInfo version, String bpid, String bpidToWrite) throws IOException {
-    DataStorage storage = null;
+    DataStorage storage = new DataStorage(version, "doNotCare");
+
     File[] versionFiles = new File[parent.length];
     for (int i = 0; i < parent.length; i++) {
       File versionFile = new File(parent[i], "VERSION");
-      FileUtil.fullyDelete(versionFile);
-      storage = new DataStorage(version, "doNotCare");
-      StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());
-      sd.write(versionFile);
+      StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
+      storage.writeProperties(versionFile, sd);
       versionFiles[i] = versionFile;
       File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]);
       createBlockPoolVersionFile(bpDir, version, bpidToWrite);
@@ -475,9 +471,8 @@
       BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(version,
           bpid);
       File versionFile = new File(bpCurDir, "VERSION");
-      FileUtil.fullyDelete(versionFile);
-      StorageDirectory sd = bpStorage.new StorageDirectory(bpDir);
-      sd.write(versionFile);
+      StorageDirectory sd = new StorageDirectory(bpDir);
+      bpStorage.writeProperties(versionFile, sd);
     }
   }
   
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
index a78292e..9ee296c 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
@@ -25,6 +25,7 @@
 import java.util.Random;
 
 import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -56,7 +57,7 @@
     ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.OFF);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
 //    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF);
   }
 
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
index 592705a..292f7d8 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
@@ -25,9 +25,30 @@
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.Daemon;
 
 public class BlockManagerTestUtil {
+
+  /** @return the datanode descriptor for the given the given storageID. */
+  public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
+      final String storageID) {
+    ns.readLock();
+    try {
+      return ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
+    } finally {
+      ns.readUnlock();
+    }
+  }
+
+
+  /**
+   * Refresh block queue counts on the name-node.
+   */
+  public static void updateState(final BlockManager blockManager) {
+    blockManager.updateState();
+  }
+
   /**
    * @return a tuple of the replica state (number racks, number live
    * replicas, and number needed replicas) for the given block.
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
similarity index 67%
rename from hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java
rename to hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
index 0299cc5..3e32a95 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import junit.framework.TestCase;
 
@@ -23,8 +23,10 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 
 /**
  * Test if FSNamesystem handles heartbeat right
@@ -41,6 +43,8 @@
     try {
       cluster.waitActive();
       final FSNamesystem namesystem = cluster.getNamesystem();
+      final BlockManager bm = namesystem.getBlockManager();
+      final int blockInvalidateLimit = bm.getDatanodeManager().blockInvalidateLimit;
       DatanodeDescriptor[] nodes =
         namesystem.heartbeats.toArray(new DatanodeDescriptor[NUM_OF_DATANODES]);
       assertEquals(nodes.length, NUM_OF_DATANODES);
@@ -48,26 +52,25 @@
       namesystem.writeLock();
       try {
         for (int i=0; i<nodes.length; i++) {
-          for(int j=0; j<3*namesystem.blockInvalidateLimit+1; j++) {
-            Block block = new Block(i*(namesystem.blockInvalidateLimit+1)+j, 0, 
+          for(int j=0; j<3*blockInvalidateLimit+1; j++) {
+            Block block = new Block(i*(blockInvalidateLimit+1)+j, 0, 
                 GenerationStamp.FIRST_VALID_STAMP);
-            namesystem.getBlockManager().addToInvalidates(block, nodes[i]);
+            bm.addToInvalidates(block, nodes[i]);
           }
         }
         
-        assertEquals(namesystem.blockInvalidateLimit*NUM_OF_DATANODES, 
-            namesystem.getBlockManager().computeInvalidateWork(NUM_OF_DATANODES+1));
-        assertEquals(namesystem.blockInvalidateLimit*NUM_OF_DATANODES, 
-            namesystem.getBlockManager().computeInvalidateWork(NUM_OF_DATANODES));
-        assertEquals(namesystem.blockInvalidateLimit*(NUM_OF_DATANODES-1), 
-            namesystem.getBlockManager().computeInvalidateWork(NUM_OF_DATANODES-1));
-        int workCount = namesystem.getBlockManager().computeInvalidateWork(1);
+        assertEquals(blockInvalidateLimit*NUM_OF_DATANODES, 
+            bm.computeInvalidateWork(NUM_OF_DATANODES+1));
+        assertEquals(blockInvalidateLimit*NUM_OF_DATANODES, 
+            bm.computeInvalidateWork(NUM_OF_DATANODES));
+        assertEquals(blockInvalidateLimit*(NUM_OF_DATANODES-1), 
+            bm.computeInvalidateWork(NUM_OF_DATANODES-1));
+        int workCount = bm.computeInvalidateWork(1);
         if (workCount == 1) {
-          assertEquals(namesystem.blockInvalidateLimit+1, 
-              namesystem.getBlockManager().computeInvalidateWork(2));
+          assertEquals(blockInvalidateLimit+1, bm.computeInvalidateWork(2));
         } else {
-          assertEquals(workCount, namesystem.blockInvalidateLimit);
-          assertEquals(2, namesystem.getBlockManager().computeInvalidateWork(2));
+          assertEquals(workCount, blockInvalidateLimit);
+          assertEquals(2, bm.computeInvalidateWork(2));
         }
       } finally {
         namesystem.writeUnlock();
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
index aed48e4..9b2b322 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
@@ -30,6 +30,7 @@
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -54,14 +55,9 @@
       final String poolId = namesystem.getBlockPoolId();
       final DatanodeRegistration nodeReg = 
         DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
-        
-      namesystem.readLock();
-      DatanodeDescriptor dd;
-      try {
-        dd = namesystem.getDatanode(nodeReg);
-      } finally {
-        namesystem.readUnlock();
-      }
+
+
+      final DatanodeDescriptor dd = NameNodeAdapter.getDatanode(namesystem, nodeReg);
       
       final int REMAINING_BLOCKS = 1;
       final int MAX_REPLICATE_LIMIT = 
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
index 5f281ae..33e102d 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
@@ -596,7 +596,7 @@
   }
 
   private void printStats() {
-    NameNodeAdapter.refreshBlockCounts(cluster.getNameNode());
+    BlockManagerTestUtil.updateState(cluster.getNamesystem().getBlockManager());
     if(LOG.isDebugEnabled()) {
       LOG.debug("Missing " + cluster.getNamesystem().getMissingBlocksCount());
       LOG.debug("Corrupted " + cluster.getNamesystem().getCorruptReplicaBlocks());
@@ -667,7 +667,7 @@
 
   private static void initLoggers() {
     ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) TestBlockReport.LOG).getLogger().setLevel(Level.ALL);
   }
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
index 2dbe281..919dd71 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
@@ -46,14 +46,6 @@
   }
 
   /**
-   * Refresh block queue counts on the name-node.
-   * @param namenode to proxy the invocation to
-   */
-  public static void refreshBlockCounts(NameNode namenode) {
-    namenode.getNamesystem().getBlockManager().updateState();
-  }
-
-  /**
    * Get the internal RPC server instance.
    * @return rpc server
    */
@@ -68,12 +60,11 @@
   /**
    * Return the datanode descriptor for the given datanode.
    */
-  public static DatanodeDescriptor getDatanode(NameNode namenode,
+  public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
       DatanodeID id) throws IOException {
-    FSNamesystem ns = namenode.getNamesystem();
     ns.readLock();
     try {
-      return ns.getDatanode(id);
+      return ns.getBlockManager().getDatanodeManager().getDatanode(id);
     } finally {
       ns.readUnlock();
     }
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
index 0c67e41..94b733c 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
@@ -35,6 +35,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.junit.After;
 import org.junit.Before;
@@ -54,7 +55,7 @@
     Iterator<StorageDirectory> sdit = 
       fsImage.getStorage().dirIterator(NNStorage.NameNodeDirType.IMAGE);
     StorageDirectory sd = sdit.next();
-    Properties props = sd.readFrom(sd.getVersionFile());
+    Properties props = Storage.readPropertiesFile(sd.getVersionFile());
     String cid = props.getProperty("clusterID");
     LOG.info("successfully formated : sd="+sd.getCurrentDir() + ";cid="+cid);
     return cid;
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index 0c8ed4c..277000d 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -29,6 +29,7 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
@@ -61,13 +62,8 @@
     FSNamesystem namesystem = cluster.getNamesystem();
     String state = alive ? "alive" : "dead";
     while (System.currentTimeMillis() < stopTime) {
-      namesystem.readLock();
-      DatanodeDescriptor dd;
-      try {
-        dd = namesystem.getDatanode(nodeID);
-      } finally {
-        namesystem.readUnlock();
-      }
+      final DatanodeDescriptor dd = BlockManagerTestUtil.getDatanode(
+          namesystem, nodeID);
       if (dd.isAlive == alive) {
         LOG.info("datanode " + nodeID + " is " + state);
         return;
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
index ba240fb..8113324 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
@@ -87,18 +87,13 @@
     assertTrue(doAnEdit());
     // Invalidate both edits journals.
     invalidateEditsDirAtIndex(0, true);
-    EditLogOutputStream elos = invalidateEditsDirAtIndex(1, true);
+    invalidateEditsDirAtIndex(1, true);
     // Make sure runtime.exit(...) hasn't been called at all yet.
     assertExitInvocations(0);
     assertTrue(doAnEdit());
     // The previous edit could not be synced to any persistent storage, should
     // have halted the NN.
     assertExitInvocations(1);
-    // Restore an edits journal to working order.
-    restoreEditsDirAtIndex(1, elos);
-    assertTrue(doAnEdit());
-    // Make sure we didn't make another call to runtime.exit(...).
-    assertExitInvocations(1);
   }
   
   @Test
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
new file mode 100644
index 0000000..5e828b6
--- /dev/null
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.junit.Test;
+
+public class TestEditsDoubleBuffer {
+  @Test
+  public void testDoubleBuffer() throws IOException {
+    EditsDoubleBuffer buf = new EditsDoubleBuffer(1024);
+    
+    assertTrue(buf.isFlushed());
+    byte[] data = new byte[100];
+    buf.writeRaw(data, 0, data.length);
+    assertEquals("Should count new data correctly",
+        data.length, buf.countBufferedBytes());
+
+    assertTrue("Writing to current buffer should not affect flush state",
+        buf.isFlushed());
+
+    // Swap the buffers
+    buf.setReadyToFlush();
+    assertEquals("Swapping buffers should still count buffered bytes",
+        data.length, buf.countBufferedBytes());
+    assertFalse(buf.isFlushed());
+ 
+    // Flush to a stream
+    DataOutputBuffer outBuf = new DataOutputBuffer();
+    buf.flushTo(outBuf);
+    assertEquals(data.length, outBuf.getLength());
+    assertTrue(buf.isFlushed());
+    assertEquals(0, buf.countBufferedBytes());
+    
+    // Write some more
+    buf.writeRaw(data, 0, data.length);
+    assertEquals("Should count new data correctly",
+        data.length, buf.countBufferedBytes());
+    buf.setReadyToFlush();
+    buf.flushTo(outBuf);
+    
+    assertEquals(data.length * 2, outBuf.getLength());
+    
+    assertEquals(0, buf.countBufferedBytes());
+
+    outBuf.close();
+  }
+  
+  @Test
+  public void shouldFailToCloseWhenUnflushed() throws IOException {
+    EditsDoubleBuffer buf = new EditsDoubleBuffer(1024);
+    buf.writeRaw(new byte[1], 0, 1);
+    try {
+      buf.close();
+      fail("Did not fail to close with unflushed data");
+    } catch (IOException ioe) {
+      if (!ioe.toString().contains("still to be flushed")) {
+        throw ioe;
+      }
+    }
+  }
+}
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
index b98c227..c68f7ea 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
@@ -42,7 +42,6 @@
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
-import org.apache.hadoop.hdfs.server.common.StorageAdapter;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.log4j.Level;
@@ -148,10 +147,8 @@
       break;
     case WRITE_STORAGE_ONE:
       // The spy throws on exception on one particular storage directory
-      StorageDirectory dir = StorageAdapter.spyOnStorageDirectory(
-          storage, 1);
-      doThrow(new RuntimeException("Injected"))
-        .when(dir).write();
+      doAnswer(new FaultySaveImage(true))
+        .when(spyStorage).writeProperties((StorageDirectory)anyObject());
       // TODO: unfortunately this fails -- should be improved.
       // See HDFS-2173.
       shouldFail = true;
diff --git a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
index 93f58da..1b51a18 100644
--- a/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
+++ b/hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
@@ -131,8 +131,7 @@
           EditLogOutputStream mockStream = spy(j.getCurrentStream());
           j.setCurrentStreamForTests(mockStream);
           doThrow(new IOException("Injected fault: write")).
-            when(mockStream).write(Mockito.anyByte(),
-                Mockito.anyLong(), (Writable[]) Mockito.anyVararg());
+            when(mockStream).write(Mockito.<FSEditLogOp>anyObject());
         }
       }
     }
diff --git a/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java b/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
index d6c6e61..ac36029 100644
--- a/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
+++ b/hdfs/src/test/unit/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
@@ -84,7 +84,7 @@
     new NamespaceInfo(1,CLUSTER_ID, POOL_ID, 2, 3);
 
   static {
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LOG).getLogger().setLevel(Level.ALL);
   }