Merge from trunk to branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1619018 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/BUILDING.txt b/BUILDING.txt
index 59eb447..3940a98 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -81,6 +81,27 @@
     the final tar file. This option requires that -Dsnappy.lib is also given,
     and it ignores the -Dsnappy.prefix option.
 
+ OpenSSL build options:
+
+   OpenSSL includes a crypto library that can be utilized by the native code.
+   It is currently an optional component, meaning that Hadoop can be built with
+   or without this dependency.
+
+  * Use -Drequire.openssl to fail the build if libcrypto.so is not found.
+    If this option is not specified and the openssl library is missing,
+    we silently build a version of libhadoop.so that cannot make use of
+    openssl. This option is recommended if you plan on making use of openssl 
+    and want to get more repeatable builds.
+  * Use -Dopenssl.prefix to specify a nonstandard location for the libcrypto
+    header files and library files. You do not need this option if you have
+    installed openssl using a package manager.
+  * Use -Dopenssl.lib to specify a nonstandard location for the libcrypto library
+    files. Similarly to openssl.prefix, you do not need this option if you have
+    installed openssl using a package manager.
+  * Use -Dbundle.openssl to copy the contents of the openssl.lib directory into
+    the final tar file. This option requires that -Dopenssl.lib is also given,
+    and it ignores the -Dopenssl.prefix option.
+
    Tests options:
 
   * Use -DskipTests to skip tests when running the following Maven goals:
diff --git a/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt
new file mode 100644
index 0000000..d036e71
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/CHANGES-fs-encryption.txt
@@ -0,0 +1,61 @@
+Hadoop Common Change Log for HDFS-6134 and HADOOP-10150
+
+fs-encryption (Unreleased)
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+    HADOOP-10734. Implement high-performance secure random number sources.
+    (Yi Liu via Colin Patrick McCabe)
+
+  IMPROVEMENTS
+
+    HADOOP-10603. Crypto input and output streams implementing Hadoop stream
+    interfaces. (Yi Liu and Charles Lamb)
+
+    HADOOP-10628. Javadoc and few code style improvement for Crypto
+    input and output streams. (Yi Liu via clamb)
+
+    HADOOP-10632. Minor improvements to Crypto input and output streams. 
+    (Yi Liu)
+
+    HADOOP-10635. Add a method to CryptoCodec to generate SRNs for IV. (Yi Liu)
+
+    HADOOP-10653. Add a new constructor for CryptoInputStream that 
+    receives current position of wrapped stream. (Yi Liu)
+
+    HADOOP-10662. NullPointerException in CryptoInputStream while wrapped
+    stream is not ByteBufferReadable. Add tests using normal stream. (Yi Liu)
+
+    HADOOP-10713. Refactor CryptoCodec#generateSecureRandom to take a byte[]. 
+    (wang via yliu)
+
+    HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL. 
+    (Yi Liu via cmccabe)
+
+    HADOOP-10803. Update OpensslCipher#getInstance to accept CipherSuite#name
+    format. (Yi Liu)
+
+    HADOOP-10735. Fall back AesCtrCryptoCodec implementation from OpenSSL to
+    JCE if non native support. (Yi Liu)
+
+    HADOOP-10870. Failed to load OpenSSL cipher error logs on systems with old
+    openssl versions (cmccabe)
+
+    HADOOP-10853. Refactor get instance of CryptoCodec and support create via
+    algorithm/mode/padding. (Yi Liu)
+
+    HADOOP-10919. Copy command should preserve raw.* namespace
+    extended attributes. (clamb)
+
+    HDFS-6873. Constants in CommandWithDestination should be static. (clamb)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HADOOP-10871. incorrect prototype in OpensslSecureRandom.c (cmccabe)
+
+    HADOOP-10886. CryptoCodec#getCodecclasses throws NPE when configurations not 
+    loaded. (umamahesh)
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index fe8aba1..09f1c5a 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -499,6 +499,10 @@
         <snappy.lib></snappy.lib>
         <snappy.include></snappy.include>
         <require.snappy>false</require.snappy>
+        <openssl.prefix></openssl.prefix>
+        <openssl.lib></openssl.lib>
+        <openssl.include></openssl.include>
+        <require.openssl>false</require.openssl>
       </properties>
       <build>
         <plugins>
@@ -548,6 +552,8 @@
                     <javahClassName>org.apache.hadoop.io.compress.snappy.SnappyDecompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Compressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Decompressor</javahClassName>
+                    <javahClassName>org.apache.hadoop.crypto.OpensslCipher</javahClassName>
+                    <javahClassName>org.apache.hadoop.crypto.random.OpensslSecureRandom</javahClassName>
                     <javahClassName>org.apache.hadoop.util.NativeCrc32</javahClassName>
                     <javahClassName>org.apache.hadoop.net.unix.DomainSocket</javahClassName>
                     <javahClassName>org.apache.hadoop.net.unix.DomainSocketWatcher</javahClassName>
@@ -568,7 +574,7 @@
                 <configuration>
                   <target>
                     <exec executable="cmake" dir="${project.build.directory}/native" failonerror="true">
-                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_BZIP2=${require.bzip2} -DREQUIRE_SNAPPY=${require.snappy} -DCUSTOM_SNAPPY_PREFIX=${snappy.prefix} -DCUSTOM_SNAPPY_LIB=${snappy.lib} -DCUSTOM_SNAPPY_INCLUDE=${snappy.include}"/>
+                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_BZIP2=${require.bzip2} -DREQUIRE_SNAPPY=${require.snappy} -DCUSTOM_SNAPPY_PREFIX=${snappy.prefix} -DCUSTOM_SNAPPY_LIB=${snappy.lib} -DCUSTOM_SNAPPY_INCLUDE=${snappy.include} -DREQUIRE_OPENSSL=${require.openssl} -DCUSTOM_OPENSSL_PREFIX=${openssl.prefix} -DCUSTOM_OPENSSL_LIB=${openssl.lib} -DCUSTOM_OPENSSL_INCLUDE=${openssl.include}"/>
                     </exec>
                     <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
                       <arg line="VERBOSE=1"/>
@@ -612,6 +618,11 @@
         <snappy.include></snappy.include>
         <require.snappy>false</require.snappy>
         <bundle.snappy.in.bin>true</bundle.snappy.in.bin>
+        <openssl.prefix></openssl.prefix>
+        <openssl.lib></openssl.lib>
+        <openssl.include></openssl.include>
+        <require.openssl>false</require.openssl>
+        <bundle.openssl.in.bin>true</bundle.openssl.in.bin>
       </properties>
       <build>
         <plugins>
@@ -657,6 +668,8 @@
                     <javahClassName>org.apache.hadoop.io.compress.snappy.SnappyDecompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Compressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Decompressor</javahClassName>
+                    <javahClassName>org.apache.hadoop.crypto.OpensslCipher</javahClassName>
+                    <javahClassName>org.apache.hadoop.crypto.random.OpensslSecureRandom</javahClassName>
                     <javahClassName>org.apache.hadoop.util.NativeCrc32</javahClassName>
                   </javahClassNames>
                   <javahOutputDirectory>${project.build.directory}/native/javah</javahOutputDirectory>
@@ -701,6 +714,10 @@
                     <argument>/p:CustomSnappyLib=${snappy.lib}</argument>
                     <argument>/p:CustomSnappyInclude=${snappy.include}</argument>
                     <argument>/p:RequireSnappy=${require.snappy}</argument>
+                    <argument>/p:CustomOpensslPrefix=${openssl.prefix}</argument>
+                    <argument>/p:CustomOpensslLib=${openssl.lib}</argument>
+                    <argument>/p:CustomOpensslInclude=${openssl.include}</argument>
+                    <argument>/p:RequireOpenssl=${require.openssl}</argument>
                   </arguments>
                 </configuration>
               </execution>
diff --git a/hadoop-common-project/hadoop-common/src/CMakeLists.txt b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
index dec63c4..84c27e5 100644
--- a/hadoop-common-project/hadoop-common/src/CMakeLists.txt
+++ b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
@@ -145,6 +145,38 @@
     ENDIF(REQUIRE_SNAPPY)
 endif (SNAPPY_LIBRARY AND SNAPPY_INCLUDE_DIR)
 
+SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES CMAKE_FIND_LIBRARY_SUFFIXES)
+set_find_shared_library_version("1.0.0")
+SET(OPENSSL_NAME "crypto")
+IF(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
+    SET(OPENSSL_NAME "eay32")
+ENDIF()
+find_library(OPENSSL_LIBRARY
+    NAMES ${OPENSSL_NAME}
+    PATHS ${CUSTOM_OPENSSL_PREFIX} ${CUSTOM_OPENSSL_PREFIX}/lib
+          ${CUSTOM_OPENSSL_PREFIX}/lib64 ${CUSTOM_OPENSSL_LIB} NO_DEFAULT_PATH)
+find_library(OPENSSL_LIBRARY
+    NAMES ${OPENSSL_NAME})
+SET(CMAKE_FIND_LIBRARY_SUFFIXES STORED_CMAKE_FIND_LIBRARY_SUFFIXES)
+find_path(OPENSSL_INCLUDE_DIR 
+    NAMES openssl/evp.h
+    PATHS ${CUSTOM_OPENSSL_PREFIX} ${CUSTOM_OPENSSL_PREFIX}/include
+          ${CUSTOM_OPENSSL_INCLUDE} NO_DEFAULT_PATH)
+find_path(OPENSSL_INCLUDE_DIR 
+    NAMES openssl/evp.h)
+if (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR)
+    GET_FILENAME_COMPONENT(HADOOP_OPENSSL_LIBRARY ${OPENSSL_LIBRARY} NAME)
+    SET(OPENSSL_SOURCE_FILES
+        "${D}/crypto/OpensslCipher.c"
+        "${D}/crypto/random/OpensslSecureRandom.c")
+else (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR)
+    SET(OPENSSL_INCLUDE_DIR "")
+    SET(OPENSSL_SOURCE_FILES "")
+    IF(REQUIRE_OPENSSL)
+        MESSAGE(FATAL_ERROR "Required openssl library could not be found.  OPENSSL_LIBRARY=${OPENSSL_LIBRARY}, OPENSSL_INCLUDE_DIR=${OPENSSL_INCLUDE_DIR}, CUSTOM_OPENSSL_INCLUDE_DIR=${CUSTOM_OPENSSL_INCLUDE_DIR}, CUSTOM_OPENSSL_PREFIX=${CUSTOM_OPENSSL_PREFIX}, CUSTOM_OPENSSL_INCLUDE=${CUSTOM_OPENSSL_INCLUDE}")
+    ENDIF(REQUIRE_OPENSSL)
+endif (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR)
+
 include_directories(
     ${GENERATED_JAVAH}
     main/native/src
@@ -155,6 +187,7 @@
     ${ZLIB_INCLUDE_DIRS}
     ${BZIP2_INCLUDE_DIR}
     ${SNAPPY_INCLUDE_DIR}
+    ${OPENSSL_INCLUDE_DIR}
     ${D}/util
 )
 CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
@@ -172,6 +205,7 @@
     ${D}/io/compress/lz4/lz4.c
     ${D}/io/compress/lz4/lz4hc.c
     ${SNAPPY_SOURCE_FILES}
+    ${OPENSSL_SOURCE_FILES}
     ${D}/io/compress/zlib/ZlibCompressor.c
     ${D}/io/compress/zlib/ZlibDecompressor.c
     ${BZIP2_SOURCE_FILES}
diff --git a/hadoop-common-project/hadoop-common/src/config.h.cmake b/hadoop-common-project/hadoop-common/src/config.h.cmake
index 020017c..d71271d 100644
--- a/hadoop-common-project/hadoop-common/src/config.h.cmake
+++ b/hadoop-common-project/hadoop-common/src/config.h.cmake
@@ -21,6 +21,7 @@
 #cmakedefine HADOOP_ZLIB_LIBRARY "@HADOOP_ZLIB_LIBRARY@"
 #cmakedefine HADOOP_BZIP2_LIBRARY "@HADOOP_BZIP2_LIBRARY@"
 #cmakedefine HADOOP_SNAPPY_LIBRARY "@HADOOP_SNAPPY_LIBRARY@"
+#cmakedefine HADOOP_OPENSSL_LIBRARY "@HADOOP_OPENSSL_LIBRARY@"
 #cmakedefine HAVE_SYNC_FILE_RANGE
 #cmakedefine HAVE_POSIX_FADVISE
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java
new file mode 100644
index 0000000..8f8bc66
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import com.google.common.base.Preconditions;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public abstract class AesCtrCryptoCodec extends CryptoCodec {
+
+  protected static final CipherSuite SUITE = CipherSuite.AES_CTR_NOPADDING;
+
+  /**
+   * For AES, the algorithm block is fixed size of 128 bits.
+   * @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard
+   */
+  private static final int AES_BLOCK_SIZE = SUITE.getAlgorithmBlockSize();
+  private static final int CTR_OFFSET = 8;
+
+  @Override
+  public CipherSuite getCipherSuite() {
+    return SUITE;
+  }
+  
+  /**
+   * The IV is produced by adding the initial IV to the counter. IV length 
+   * should be the same as {@link #AES_BLOCK_SIZE}
+   */
+  @Override
+  public void calculateIV(byte[] initIV, long counter, byte[] IV) {
+    Preconditions.checkArgument(initIV.length == AES_BLOCK_SIZE);
+    Preconditions.checkArgument(IV.length == AES_BLOCK_SIZE);
+    
+    System.arraycopy(initIV, 0, IV, 0, CTR_OFFSET);
+    long l = 0;
+    for (int i = 0; i < 8; i++) {
+      l = ((l << 8) | (initIV[CTR_OFFSET + i] & 0xff));
+    }
+    l += counter;
+    IV[CTR_OFFSET + 0] = (byte) (l >>> 56);
+    IV[CTR_OFFSET + 1] = (byte) (l >>> 48);
+    IV[CTR_OFFSET + 2] = (byte) (l >>> 40);
+    IV[CTR_OFFSET + 3] = (byte) (l >>> 32);
+    IV[CTR_OFFSET + 4] = (byte) (l >>> 24);
+    IV[CTR_OFFSET + 5] = (byte) (l >>> 16);
+    IV[CTR_OFFSET + 6] = (byte) (l >>> 8);
+    IV[CTR_OFFSET + 7] = (byte) (l);
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java
new file mode 100644
index 0000000..9962b38
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.crypto;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Defines properties of a CipherSuite. Modeled after the ciphers in
+ * {@link javax.crypto.Cipher}.
+ */
+@InterfaceAudience.Private
+public enum CipherSuite {
+  UNKNOWN("Unknown", 0),
+  AES_CTR_NOPADDING("AES/CTR/NoPadding", 16);
+
+  private final String name;
+  private final int algoBlockSize;
+
+  private Integer unknownValue = null;
+
+  CipherSuite(String name, int algoBlockSize) {
+    this.name = name;
+    this.algoBlockSize = algoBlockSize;
+  }
+
+  public void setUnknownValue(int unknown) {
+    this.unknownValue = unknown;
+  }
+
+  public int getUnknownValue() {
+    return unknownValue;
+  }
+
+  /**
+   * @return name of cipher suite, as in {@link javax.crypto.Cipher}
+   */
+  public String getName() {
+    return name;
+  }
+
+  /**
+   * @return size of an algorithm block in bytes
+   */
+  public int getAlgorithmBlockSize() {
+    return algoBlockSize;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder("{");
+    builder.append("name: " + name);
+    builder.append(", algorithmBlockSize: " + algoBlockSize);
+    if (unknownValue != null) {
+      builder.append(", unknownValue: " + unknownValue);
+    }
+    builder.append("}");
+    return builder.toString();
+  }
+  
+  public static void checkName(String name) {
+    CipherSuite[] suites = CipherSuite.values();
+    for (CipherSuite suite : suites) {
+      if (suite.getName().equals(name)) {
+        return;
+      }
+    }
+    throw new IllegalArgumentException("Invalid cipher suite name: " + name);
+  }
+  
+  /**
+   * Convert to CipherSuite from name, {@link #algoBlockSize} is fixed for
+   * certain cipher suite, just need to compare the name.
+   * @param name cipher suite name
+   * @return CipherSuite cipher suite
+   */
+  public static CipherSuite convert(String name) {
+    CipherSuite[] suites = CipherSuite.values();
+    for (CipherSuite suite : suites) {
+      if (suite.getName().equals(name)) {
+        return suite;
+      }
+    }
+    throw new IllegalArgumentException("Invalid cipher suite name: " + name);
+  }
+  
+  /**
+   * Returns suffix of cipher suite configuration.
+   * @return String configuration suffix
+   */
+  public String getConfigSuffix() {
+    String[] parts = name.split("/");
+    StringBuilder suffix = new StringBuilder();
+    for (String part : parts) {
+      suffix.append(".").append(part.toLowerCase());
+    }
+    
+    return suffix.toString();
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java
new file mode 100644
index 0000000..9de7f95
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.security.GeneralSecurityException;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Splitter;
+import com.google.common.collect.Lists;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT;
+
+/**
+ * Crypto codec class, encapsulates encryptor/decryptor pair.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public abstract class CryptoCodec implements Configurable {
+  public static Logger LOG = LoggerFactory.getLogger(CryptoCodec.class);
+  
+  /**
+   * Get crypto codec for specified algorithm/mode/padding.
+   * 
+   * @param conf
+   *          the configuration
+   * @param CipherSuite
+   *          algorithm/mode/padding
+   * @return CryptoCodec the codec object. Null value will be returned if no
+   *         crypto codec classes with cipher suite configured.
+   */
+  public static CryptoCodec getInstance(Configuration conf, 
+      CipherSuite cipherSuite) {
+    List<Class<? extends CryptoCodec>> klasses = getCodecClasses(
+        conf, cipherSuite);
+    if (klasses == null) {
+      return null;
+    }
+    CryptoCodec codec = null;
+    for (Class<? extends CryptoCodec> klass : klasses) {
+      try {
+        CryptoCodec c = ReflectionUtils.newInstance(klass, conf);
+        if (c.getCipherSuite().getName().equals(cipherSuite.getName())) {
+          if (codec == null) {
+            LOG.debug("Using crypto codec {}.", klass.getName());
+            codec = c;
+          }
+        } else {
+          LOG.warn("Crypto codec {} doesn't meet the cipher suite {}.", 
+              klass.getName(), cipherSuite.getName());
+        }
+      } catch (Exception e) {
+        LOG.warn("Crypto codec {} is not available.", klass.getName());
+      }
+    }
+    
+    if (codec != null) {
+      return codec;
+    }
+    
+    throw new RuntimeException("No available crypto codec which meets " + 
+        "the cipher suite " + cipherSuite.getName() + ".");
+  }
+  
+  /**
+   * Get crypto codec for algorithm/mode/padding in config value
+   * hadoop.security.crypto.cipher.suite
+   * 
+   * @param conf
+   *          the configuration
+   * @return CryptoCodec the codec object Null value will be returned if no
+   *         crypto codec classes with cipher suite configured.
+   */
+  public static CryptoCodec getInstance(Configuration conf) {
+    String name = conf.get(HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY, 
+        HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT);
+    return getInstance(conf, CipherSuite.convert(name));
+  }
+  
+  private static List<Class<? extends CryptoCodec>> getCodecClasses(
+      Configuration conf, CipherSuite cipherSuite) {
+    List<Class<? extends CryptoCodec>> result = Lists.newArrayList();
+    String configName = HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX + 
+        cipherSuite.getConfigSuffix();
+    String codecString = conf.get(configName);
+    if (codecString == null) {
+      LOG.warn("No crypto codec classes with cipher suite configured.");
+      return null;
+    }
+    for (String c : Splitter.on(',').trimResults().omitEmptyStrings().
+        split(codecString)) {
+      try {
+        Class<?> cls = conf.getClassByName(c);
+        result.add(cls.asSubclass(CryptoCodec.class));
+      } catch (ClassCastException e) {
+        LOG.warn("Class " + c + " is not a CryptoCodec.");
+      } catch (ClassNotFoundException e) {
+        LOG.warn("Crypto codec " + c + " not found.");
+      }
+    }
+    
+    return result;
+  }
+
+  /**
+   * @return the CipherSuite for this codec.
+   */
+  public abstract CipherSuite getCipherSuite();
+
+  /**
+   * Create a {@link org.apache.hadoop.crypto.Encryptor}. 
+   * @return Encryptor the encryptor
+   */
+  public abstract Encryptor createEncryptor() throws GeneralSecurityException;
+  
+  /**
+   * Create a {@link org.apache.hadoop.crypto.Decryptor}.
+   * @return Decryptor the decryptor
+   */
+  public abstract Decryptor createDecryptor() throws GeneralSecurityException;
+  
+  /**
+   * This interface is only for Counter (CTR) mode. Generally the Encryptor
+   * or Decryptor calculates the IV and maintain encryption context internally. 
+   * For example a {@link javax.crypto.Cipher} will maintain its encryption 
+   * context internally when we do encryption/decryption using the 
+   * Cipher#update interface. 
+   * <p/>
+   * Encryption/Decryption is not always on the entire file. For example,
+   * in Hadoop, a node may only decrypt a portion of a file (i.e. a split).
+   * In these situations, the counter is derived from the file position.
+   * <p/>
+   * The IV can be calculated by combining the initial IV and the counter with 
+   * a lossless operation (concatenation, addition, or XOR).
+   * @see http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_.28CTR.29
+   * 
+   * @param initIV initial IV
+   * @param counter counter for input stream position 
+   * @param IV the IV for input stream position
+   */
+  public abstract void calculateIV(byte[] initIV, long counter, byte[] IV);
+  
+  /**
+   * Generate a number of secure, random bytes suitable for cryptographic use.
+   * This method needs to be thread-safe.
+   *
+   * @param bytes byte array to populate with random data
+   */
+  public abstract void generateSecureRandom(byte[] bytes);
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
new file mode 100644
index 0000000..e8964ed
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
@@ -0,0 +1,680 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.FileDescriptor;
+import java.io.FileInputStream;
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.security.GeneralSecurityException;
+import java.util.EnumSet;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentLinkedQueue;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.ByteBufferReadable;
+import org.apache.hadoop.fs.CanSetDropBehind;
+import org.apache.hadoop.fs.CanSetReadahead;
+import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
+import org.apache.hadoop.fs.HasFileDescriptor;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.ReadOption;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.io.ByteBufferPool;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * CryptoInputStream decrypts data. It is not thread-safe. AES CTR mode is
+ * required in order to ensure that the plain text and cipher text have a 1:1
+ * mapping. The decryption is buffer based. The key points of the decryption
+ * are (1) calculating the counter and (2) padding through stream position:
+ * <p/>
+ * counter = base + pos/(algorithm blocksize); 
+ * padding = pos%(algorithm blocksize); 
+ * <p/>
+ * The underlying stream offset is maintained as state.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class CryptoInputStream extends FilterInputStream implements 
+    Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, 
+    CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess {
+  private static final byte[] oneByteBuf = new byte[1];
+  private final CryptoCodec codec;
+  private final Decryptor decryptor;
+  private final int bufferSize;
+  
+  /**
+   * Input data buffer. The data starts at inBuffer.position() and ends at 
+   * to inBuffer.limit().
+   */
+  private ByteBuffer inBuffer;
+  
+  /**
+   * The decrypted data buffer. The data starts at outBuffer.position() and 
+   * ends at outBuffer.limit();
+   */
+  private ByteBuffer outBuffer;
+  private long streamOffset = 0; // Underlying stream offset.
+  
+  /**
+   * Whether the underlying stream supports 
+   * {@link org.apache.hadoop.fs.ByteBufferReadable}
+   */
+  private Boolean usingByteBufferRead = null;
+  
+  /**
+   * Padding = pos%(algorithm blocksize); Padding is put into {@link #inBuffer} 
+   * before any other data goes in. The purpose of padding is to put the input 
+   * data at proper position.
+   */
+  private byte padding;
+  private boolean closed;
+  private final byte[] key;
+  private final byte[] initIV;
+  private byte[] iv;
+  
+  /** DirectBuffer pool */
+  private final Queue<ByteBuffer> bufferPool = 
+      new ConcurrentLinkedQueue<ByteBuffer>();
+  /** Decryptor pool */
+  private final Queue<Decryptor> decryptorPool = 
+      new ConcurrentLinkedQueue<Decryptor>();
+  
+  public CryptoInputStream(InputStream in, CryptoCodec codec, 
+      int bufferSize, byte[] key, byte[] iv) throws IOException {
+    this(in, codec, bufferSize, key, iv, 
+        CryptoStreamUtils.getInputStreamOffset(in));
+  }
+  
+  public CryptoInputStream(InputStream in, CryptoCodec codec,
+      int bufferSize, byte[] key, byte[] iv, long streamOffset) throws IOException {
+    super(in);
+    this.bufferSize = CryptoStreamUtils.checkBufferSize(codec, bufferSize);
+    this.codec = codec;
+    this.key = key.clone();
+    this.initIV = iv.clone();
+    this.iv = iv.clone();
+    this.streamOffset = streamOffset;
+    inBuffer = ByteBuffer.allocateDirect(this.bufferSize);
+    outBuffer = ByteBuffer.allocateDirect(this.bufferSize);
+    decryptor = getDecryptor();
+    resetStreamOffset(streamOffset);
+  }
+  
+  public CryptoInputStream(InputStream in, CryptoCodec codec,
+      byte[] key, byte[] iv) throws IOException {
+    this(in, codec, CryptoStreamUtils.getBufferSize(codec.getConf()), key, iv);
+  }
+  
+  public InputStream getWrappedStream() {
+    return in;
+  }
+  
+  /**
+   * Decryption is buffer based.
+   * If there is data in {@link #outBuffer}, then read it out of this buffer.
+   * If there is no data in {@link #outBuffer}, then read more from the 
+   * underlying stream and do the decryption.
+   * @param b the buffer into which the decrypted data is read.
+   * @param off the buffer offset.
+   * @param len the maximum number of decrypted data bytes to read.
+   * @return int the total number of decrypted data bytes read into the buffer.
+   * @throws IOException
+   */
+  @Override
+  public int read(byte[] b, int off, int len) throws IOException {
+    checkStream();
+    if (b == null) {
+      throw new NullPointerException();
+    } else if (off < 0 || len < 0 || len > b.length - off) {
+      throw new IndexOutOfBoundsException();
+    } else if (len == 0) {
+      return 0;
+    }
+    
+    final int remaining = outBuffer.remaining();
+    if (remaining > 0) {
+      int n = Math.min(len, remaining);
+      outBuffer.get(b, off, n);
+      return n;
+    } else {
+      int n = 0;
+      
+      /*
+       * Check whether the underlying stream is {@link ByteBufferReadable},
+       * it can avoid bytes copy.
+       */
+      if (usingByteBufferRead == null) {
+        if (in instanceof ByteBufferReadable) {
+          try {
+            n = ((ByteBufferReadable) in).read(inBuffer);
+            usingByteBufferRead = Boolean.TRUE;
+          } catch (UnsupportedOperationException e) {
+            usingByteBufferRead = Boolean.FALSE;
+          }
+        } else {
+          usingByteBufferRead = Boolean.FALSE;
+        }
+        if (!usingByteBufferRead) {
+          n = readFromUnderlyingStream(inBuffer);
+        }
+      } else {
+        if (usingByteBufferRead) {
+          n = ((ByteBufferReadable) in).read(inBuffer);
+        } else {
+          n = readFromUnderlyingStream(inBuffer);
+        }
+      }
+      if (n <= 0) {
+        return n;
+      }
+      
+      streamOffset += n; // Read n bytes
+      decrypt(decryptor, inBuffer, outBuffer, padding);
+      padding = afterDecryption(decryptor, inBuffer, streamOffset, iv);
+      n = Math.min(len, outBuffer.remaining());
+      outBuffer.get(b, off, n);
+      return n;
+    }
+  }
+  
+  /** Read data from underlying stream. */
+  private int readFromUnderlyingStream(ByteBuffer inBuffer) throws IOException {
+    final int toRead = inBuffer.remaining();
+    final byte[] tmp = getTmpBuf();
+    final int n = in.read(tmp, 0, toRead);
+    if (n > 0) {
+      inBuffer.put(tmp, 0, n);
+    }
+    return n;
+  }
+  
+  private byte[] tmpBuf;
+  private byte[] getTmpBuf() {
+    if (tmpBuf == null) {
+      tmpBuf = new byte[bufferSize];
+    }
+    return tmpBuf;
+  }
+  
+  /**
+   * Do the decryption using inBuffer as input and outBuffer as output.
+   * Upon return, inBuffer is cleared; the decrypted data starts at 
+   * outBuffer.position() and ends at outBuffer.limit();
+   */
+  private void decrypt(Decryptor decryptor, ByteBuffer inBuffer, 
+      ByteBuffer outBuffer, byte padding) throws IOException {
+    Preconditions.checkState(inBuffer.position() >= padding);
+    if(inBuffer.position() == padding) {
+      // There is no real data in inBuffer.
+      return;
+    }
+    inBuffer.flip();
+    outBuffer.clear();
+    decryptor.decrypt(inBuffer, outBuffer);
+    inBuffer.clear();
+    outBuffer.flip();
+    if (padding > 0) {
+      /*
+       * The plain text and cipher text have a 1:1 mapping, they start at the 
+       * same position.
+       */
+      outBuffer.position(padding);
+    }
+  }
+  
+  /**
+   * This method is executed immediately after decryption. Check whether 
+   * decryptor should be updated and recalculate padding if needed. 
+   */
+  private byte afterDecryption(Decryptor decryptor, ByteBuffer inBuffer, 
+      long position, byte[] iv) throws IOException {
+    byte padding = 0;
+    if (decryptor.isContextReset()) {
+      /*
+       * This code is generally not executed since the decryptor usually 
+       * maintains decryption context (e.g. the counter) internally. However, 
+       * some implementations can't maintain context so a re-init is necessary 
+       * after each decryption call.
+       */
+      updateDecryptor(decryptor, position, iv);
+      padding = getPadding(position);
+      inBuffer.position(padding);
+    }
+    return padding;
+  }
+  
+  private long getCounter(long position) {
+    return position / codec.getCipherSuite().getAlgorithmBlockSize();
+  }
+  
+  private byte getPadding(long position) {
+    return (byte)(position % codec.getCipherSuite().getAlgorithmBlockSize());
+  }
+  
+  /** Calculate the counter and iv, update the decryptor. */
+  private void updateDecryptor(Decryptor decryptor, long position, byte[] iv) 
+      throws IOException {
+    final long counter = getCounter(position);
+    codec.calculateIV(initIV, counter, iv);
+    decryptor.init(key, iv);
+  }
+  
+  /**
+   * Reset the underlying stream offset; clear {@link #inBuffer} and 
+   * {@link #outBuffer}. This Typically happens during {@link #seek(long)} 
+   * or {@link #skip(long)}.
+   */
+  private void resetStreamOffset(long offset) throws IOException {
+    streamOffset = offset;
+    inBuffer.clear();
+    outBuffer.clear();
+    outBuffer.limit(0);
+    updateDecryptor(decryptor, offset, iv);
+    padding = getPadding(offset);
+    inBuffer.position(padding); // Set proper position for input data.
+  }
+  
+  @Override
+  public void close() throws IOException {
+    if (closed) {
+      return;
+    }
+    
+    super.close();
+    freeBuffers();
+    closed = true;
+  }
+  
+  /** Positioned read. It is thread-safe */
+  @Override
+  public int read(long position, byte[] buffer, int offset, int length)
+      throws IOException {
+    checkStream();
+    try {
+      final int n = ((PositionedReadable) in).read(position, buffer, offset, 
+          length);
+      if (n > 0) {
+        // This operation does not change the current offset of the file
+        decrypt(position, buffer, offset, n);
+      }
+      
+      return n;
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not support " +
+          "positioned read.");
+    }
+  }
+  
+  /**
+   * Decrypt length bytes in buffer starting at offset. Output is also put 
+   * into buffer starting at offset. It is thread-safe.
+   */
+  private void decrypt(long position, byte[] buffer, int offset, int length) 
+      throws IOException {
+    ByteBuffer inBuffer = getBuffer();
+    ByteBuffer outBuffer = getBuffer();
+    Decryptor decryptor = null;
+    try {
+      decryptor = getDecryptor();
+      byte[] iv = initIV.clone();
+      updateDecryptor(decryptor, position, iv);
+      byte padding = getPadding(position);
+      inBuffer.position(padding); // Set proper position for input data.
+      
+      int n = 0;
+      while (n < length) {
+        int toDecrypt = Math.min(length - n, inBuffer.remaining());
+        inBuffer.put(buffer, offset + n, toDecrypt);
+        // Do decryption
+        decrypt(decryptor, inBuffer, outBuffer, padding);
+        
+        outBuffer.get(buffer, offset + n, toDecrypt);
+        n += toDecrypt;
+        padding = afterDecryption(decryptor, inBuffer, position + n, iv);
+      }
+    } finally {
+      returnBuffer(inBuffer);
+      returnBuffer(outBuffer);
+      returnDecryptor(decryptor);
+    }
+  }
+  
+  /** Positioned read fully. It is thread-safe */
+  @Override
+  public void readFully(long position, byte[] buffer, int offset, int length)
+      throws IOException {
+    checkStream();
+    try {
+      ((PositionedReadable) in).readFully(position, buffer, offset, length);
+      if (length > 0) {
+        // This operation does not change the current offset of the file
+        decrypt(position, buffer, offset, length);
+      }
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not support " +
+          "positioned readFully.");
+    }
+  }
+
+  @Override
+  public void readFully(long position, byte[] buffer) throws IOException {
+    readFully(position, buffer, 0, buffer.length);
+  }
+
+  /** Seek to a position. */
+  @Override
+  public void seek(long pos) throws IOException {
+    Preconditions.checkArgument(pos >= 0, "Cannot seek to negative offset.");
+    checkStream();
+    try {
+      /*
+       * If data of target pos in the underlying stream has already been read 
+       * and decrypted in outBuffer, we just need to re-position outBuffer.
+       */
+      if (pos <= streamOffset && pos >= (streamOffset - outBuffer.remaining())) {
+        int forward = (int) (pos - (streamOffset - outBuffer.remaining()));
+        if (forward > 0) {
+          outBuffer.position(outBuffer.position() + forward);
+        }
+      } else {
+        ((Seekable) in).seek(pos);
+        resetStreamOffset(pos);
+      }
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not support " +
+          "seek.");
+    }
+  }
+  
+  /** Skip n bytes */
+  @Override
+  public long skip(long n) throws IOException {
+    Preconditions.checkArgument(n >= 0, "Negative skip length.");
+    checkStream();
+    
+    if (n == 0) {
+      return 0;
+    } else if (n <= outBuffer.remaining()) {
+      int pos = outBuffer.position() + (int) n;
+      outBuffer.position(pos);
+      return n;
+    } else {
+      /*
+       * Subtract outBuffer.remaining() to see how many bytes we need to 
+       * skip in the underlying stream. Add outBuffer.remaining() to the 
+       * actual number of skipped bytes in the underlying stream to get the 
+       * number of skipped bytes from the user's point of view.
+       */
+      n -= outBuffer.remaining();
+      long skipped = in.skip(n);
+      if (skipped < 0) {
+        skipped = 0;
+      }
+      long pos = streamOffset + skipped;
+      skipped += outBuffer.remaining();
+      resetStreamOffset(pos);
+      return skipped;
+    }
+  }
+
+  /** Get underlying stream position. */
+  @Override
+  public long getPos() throws IOException {
+    checkStream();
+    // Equals: ((Seekable) in).getPos() - outBuffer.remaining()
+    return streamOffset - outBuffer.remaining();
+  }
+  
+  /** ByteBuffer read. */
+  @Override
+  public int read(ByteBuffer buf) throws IOException {
+    checkStream();
+    if (in instanceof ByteBufferReadable) {
+      final int unread = outBuffer.remaining();
+      if (unread > 0) { // Have unread decrypted data in buffer.
+        int toRead = buf.remaining();
+        if (toRead <= unread) {
+          final int limit = outBuffer.limit();
+          outBuffer.limit(outBuffer.position() + toRead);
+          buf.put(outBuffer);
+          outBuffer.limit(limit);
+          return toRead;
+        } else {
+          buf.put(outBuffer);
+        }
+      }
+      
+      final int pos = buf.position();
+      final int n = ((ByteBufferReadable) in).read(buf);
+      if (n > 0) {
+        streamOffset += n; // Read n bytes
+        decrypt(buf, n, pos);
+      }
+      return n;
+    }
+
+    throw new UnsupportedOperationException("ByteBuffer read unsupported " +
+        "by input stream.");
+  }
+  
+  /**
+   * Decrypt all data in buf: total n bytes from given start position.
+   * Output is also buf and same start position.
+   * buf.position() and buf.limit() should be unchanged after decryption.
+   */
+  private void decrypt(ByteBuffer buf, int n, int start) 
+      throws IOException {
+    final int pos = buf.position();
+    final int limit = buf.limit();
+    int len = 0;
+    while (len < n) {
+      buf.position(start + len);
+      buf.limit(start + len + Math.min(n - len, inBuffer.remaining()));
+      inBuffer.put(buf);
+      // Do decryption
+      try {
+        decrypt(decryptor, inBuffer, outBuffer, padding);
+        buf.position(start + len);
+        buf.limit(limit);
+        len += outBuffer.remaining();
+        buf.put(outBuffer);
+      } finally {
+        padding = afterDecryption(decryptor, inBuffer, streamOffset - (n - len), iv);
+      }
+    }
+    buf.position(pos);
+  }
+  
+  @Override
+  public int available() throws IOException {
+    checkStream();
+    
+    return in.available() + outBuffer.remaining();
+  }
+
+  @Override
+  public boolean markSupported() {
+    return false;
+  }
+  
+  @Override
+  public void mark(int readLimit) {
+  }
+  
+  @Override
+  public void reset() throws IOException {
+    throw new IOException("Mark/reset not supported");
+  }
+
+  @Override
+  public boolean seekToNewSource(long targetPos) throws IOException {
+    Preconditions.checkArgument(targetPos >= 0, 
+        "Cannot seek to negative offset.");
+    checkStream();
+    try {
+      boolean result = ((Seekable) in).seekToNewSource(targetPos);
+      resetStreamOffset(targetPos);
+      return result;
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not support " +
+          "seekToNewSource.");
+    }
+  }
+
+  @Override
+  public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
+      EnumSet<ReadOption> opts) throws IOException,
+      UnsupportedOperationException {
+    checkStream();
+    try {
+      if (outBuffer.remaining() > 0) {
+        // Have some decrypted data unread, need to reset.
+        ((Seekable) in).seek(getPos());
+        resetStreamOffset(getPos());
+      }
+      final ByteBuffer buffer = ((HasEnhancedByteBufferAccess) in).
+          read(bufferPool, maxLength, opts);
+      if (buffer != null) {
+        final int n = buffer.remaining();
+        if (n > 0) {
+          streamOffset += buffer.remaining(); // Read n bytes
+          final int pos = buffer.position();
+          decrypt(buffer, n, pos);
+        }
+      }
+      return buffer;
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not support " + 
+          "enhanced byte buffer access.");
+    }
+  }
+
+  @Override
+  public void releaseBuffer(ByteBuffer buffer) {
+    try {
+      ((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer);
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not support " + 
+          "release buffer.");
+    }
+  }
+
+  @Override
+  public void setReadahead(Long readahead) throws IOException,
+      UnsupportedOperationException {
+    try {
+      ((CanSetReadahead) in).setReadahead(readahead);
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not support " +
+          "setting the readahead caching strategy.");
+    }
+  }
+
+  @Override
+  public void setDropBehind(Boolean dropCache) throws IOException,
+      UnsupportedOperationException {
+    try {
+      ((CanSetDropBehind) in).setDropBehind(dropCache);
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not " +
+          "support setting the drop-behind caching setting.");
+    }
+  }
+
+  @Override
+  public FileDescriptor getFileDescriptor() throws IOException {
+    if (in instanceof HasFileDescriptor) {
+      return ((HasFileDescriptor) in).getFileDescriptor();
+    } else if (in instanceof FileInputStream) {
+      return ((FileInputStream) in).getFD();
+    } else {
+      return null;
+    }
+  }
+  
+  @Override
+  public int read() throws IOException {
+    return (read(oneByteBuf, 0, 1) == -1) ? -1 : (oneByteBuf[0] & 0xff);
+  }
+  
+  private void checkStream() throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
+  }
+  
+  /** Get direct buffer from pool */
+  private ByteBuffer getBuffer() {
+    ByteBuffer buffer = bufferPool.poll();
+    if (buffer == null) {
+      buffer = ByteBuffer.allocateDirect(bufferSize);
+    }
+    
+    return buffer;
+  }
+  
+  /** Return direct buffer to pool */
+  private void returnBuffer(ByteBuffer buf) {
+    if (buf != null) {
+      buf.clear();
+      bufferPool.add(buf);
+    }
+  }
+  
+  /** Forcibly free the direct buffers. */
+  private void freeBuffers() {
+    CryptoStreamUtils.freeDB(inBuffer);
+    CryptoStreamUtils.freeDB(outBuffer);
+    cleanBufferPool();
+  }
+  
+  /** Clean direct buffer pool */
+  private void cleanBufferPool() {
+    ByteBuffer buf;
+    while ((buf = bufferPool.poll()) != null) {
+      CryptoStreamUtils.freeDB(buf);
+    }
+  }
+  
+  /** Get decryptor from pool */
+  private Decryptor getDecryptor() throws IOException {
+    Decryptor decryptor = decryptorPool.poll();
+    if (decryptor == null) {
+      try {
+        decryptor = codec.createDecryptor();
+      } catch (GeneralSecurityException e) {
+        throw new IOException(e);
+      }
+    }
+    
+    return decryptor;
+  }
+  
+  /** Return decryptor to pool */
+  private void returnDecryptor(Decryptor decryptor) {
+    if (decryptor != null) {
+      decryptorPool.add(decryptor);
+    }
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
new file mode 100644
index 0000000..4f9f7f5
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
@@ -0,0 +1,280 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.FilterOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.security.GeneralSecurityException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CanSetDropBehind;
+import org.apache.hadoop.fs.Syncable;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * CryptoOutputStream encrypts data. It is not thread-safe. AES CTR mode is
+ * required in order to ensure that the plain text and cipher text have a 1:1
+ * mapping. The encryption is buffer based. The key points of the encryption are
+ * (1) calculating counter and (2) padding through stream position.
+ * <p/>
+ * counter = base + pos/(algorithm blocksize); 
+ * padding = pos%(algorithm blocksize); 
+ * <p/>
+ * The underlying stream offset is maintained as state.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class CryptoOutputStream extends FilterOutputStream implements 
+    Syncable, CanSetDropBehind {
+  private static final byte[] oneByteBuf = new byte[1];
+  private final CryptoCodec codec;
+  private final Encryptor encryptor;
+  private final int bufferSize;
+  
+  /**
+   * Input data buffer. The data starts at inBuffer.position() and ends at 
+   * inBuffer.limit().
+   */
+  private ByteBuffer inBuffer;
+  
+  /**
+   * Encrypted data buffer. The data starts at outBuffer.position() and ends at 
+   * outBuffer.limit();
+   */
+  private ByteBuffer outBuffer;
+  private long streamOffset = 0; // Underlying stream offset.
+  
+  /**
+   * Padding = pos%(algorithm blocksize); Padding is put into {@link #inBuffer} 
+   * before any other data goes in. The purpose of padding is to put input data
+   * at proper position.
+   */
+  private byte padding;
+  private boolean closed;
+  private final byte[] key;
+  private final byte[] initIV;
+  private byte[] iv;
+  
+  public CryptoOutputStream(OutputStream out, CryptoCodec codec, 
+      int bufferSize, byte[] key, byte[] iv) throws IOException {
+    this(out, codec, bufferSize, key, iv, 0);
+  }
+  
+  public CryptoOutputStream(OutputStream out, CryptoCodec codec, 
+      int bufferSize, byte[] key, byte[] iv, long streamOffset) 
+      throws IOException {
+    super(out);
+    this.bufferSize = CryptoStreamUtils.checkBufferSize(codec, bufferSize);
+    this.codec = codec;
+    this.key = key.clone();
+    this.initIV = iv.clone();
+    this.iv = iv.clone();
+    inBuffer = ByteBuffer.allocateDirect(this.bufferSize);
+    outBuffer = ByteBuffer.allocateDirect(this.bufferSize);
+    this.streamOffset = streamOffset;
+    try {
+      encryptor = codec.createEncryptor();
+    } catch (GeneralSecurityException e) {
+      throw new IOException(e);
+    }
+    updateEncryptor();
+  }
+  
+  public CryptoOutputStream(OutputStream out, CryptoCodec codec, 
+      byte[] key, byte[] iv) throws IOException {
+    this(out, codec, key, iv, 0);
+  }
+  
+  public CryptoOutputStream(OutputStream out, CryptoCodec codec, 
+      byte[] key, byte[] iv, long streamOffset) throws IOException {
+    this(out, codec, CryptoStreamUtils.getBufferSize(codec.getConf()), 
+        key, iv, streamOffset);
+  }
+  
+  public OutputStream getWrappedStream() {
+    return out;
+  }
+  
+  /**
+   * Encryption is buffer based.
+   * If there is enough room in {@link #inBuffer}, then write to this buffer.
+   * If {@link #inBuffer} is full, then do encryption and write data to the
+   * underlying stream.
+   * @param b the data.
+   * @param off the start offset in the data.
+   * @param len the number of bytes to write.
+   * @throws IOException
+   */
+  @Override
+  public void write(byte[] b, int off, int len) throws IOException {
+    checkStream();
+    if (b == null) {
+      throw new NullPointerException();
+    } else if (off < 0 || len < 0 || off > b.length || 
+        len > b.length - off) {
+      throw new IndexOutOfBoundsException();
+    }
+    while (len > 0) {
+      final int remaining = inBuffer.remaining();
+      if (len < remaining) {
+        inBuffer.put(b, off, len);
+        len = 0;
+      } else {
+        inBuffer.put(b, off, remaining);
+        off += remaining;
+        len -= remaining;
+        encrypt();
+      }
+    }
+  }
+  
+  /**
+   * Do the encryption, input is {@link #inBuffer} and output is 
+   * {@link #outBuffer}.
+   */
+  private void encrypt() throws IOException {
+    Preconditions.checkState(inBuffer.position() >= padding);
+    if (inBuffer.position() == padding) {
+      // There is no real data in the inBuffer.
+      return;
+    }
+    inBuffer.flip();
+    outBuffer.clear();
+    encryptor.encrypt(inBuffer, outBuffer);
+    inBuffer.clear();
+    outBuffer.flip();
+    if (padding > 0) {
+      /*
+       * The plain text and cipher text have a 1:1 mapping, they start at the 
+       * same position.
+       */
+      outBuffer.position(padding);
+      padding = 0;
+    }
+    final int len = outBuffer.remaining();
+    
+    /*
+     * If underlying stream supports {@link ByteBuffer} write in future, needs
+     * refine here. 
+     */
+    final byte[] tmp = getTmpBuf();
+    outBuffer.get(tmp, 0, len);
+    out.write(tmp, 0, len);
+    
+    streamOffset += len;
+    if (encryptor.isContextReset()) {
+      /*
+       * This code is generally not executed since the encryptor usually
+       * maintains encryption context (e.g. the counter) internally. However,
+       * some implementations can't maintain context so a re-init is necessary
+       * after each encryption call.
+       */
+      updateEncryptor();
+    }
+  }
+  
+  /** Update the {@link #encryptor}: calculate counter and {@link #padding}. */
+  private void updateEncryptor() throws IOException {
+    final long counter =
+        streamOffset / codec.getCipherSuite().getAlgorithmBlockSize();
+    padding =
+        (byte)(streamOffset % codec.getCipherSuite().getAlgorithmBlockSize());
+    inBuffer.position(padding); // Set proper position for input data.
+    codec.calculateIV(initIV, counter, iv);
+    encryptor.init(key, iv);
+  }
+  
+  private byte[] tmpBuf;
+  private byte[] getTmpBuf() {
+    if (tmpBuf == null) {
+      tmpBuf = new byte[bufferSize];
+    }
+    return tmpBuf;
+  }
+  
+  @Override
+  public void close() throws IOException {
+    if (closed) {
+      return;
+    }
+    
+    super.close();
+    freeBuffers();
+    closed = true;
+  }
+  
+  /**
+   * To flush, we need to encrypt the data in the buffer and write to the 
+   * underlying stream, then do the flush.
+   */
+  @Override
+  public void flush() throws IOException {
+    checkStream();
+    encrypt();
+    super.flush();
+  }
+  
+  @Override
+  public void write(int b) throws IOException {
+    oneByteBuf[0] = (byte)(b & 0xff);
+    write(oneByteBuf, 0, oneByteBuf.length);
+  }
+  
+  private void checkStream() throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
+  }
+  
+  @Override
+  public void setDropBehind(Boolean dropCache) throws IOException,
+      UnsupportedOperationException {
+    try {
+      ((CanSetDropBehind) out).setDropBehind(dropCache);
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not " +
+          "support setting the drop-behind caching.");
+    }
+  }
+
+  @Override
+  public void hflush() throws IOException {
+    flush();
+    if (out instanceof Syncable) {
+      ((Syncable)out).hflush();
+    }
+  }
+
+  @Override
+  public void hsync() throws IOException {
+    flush();
+    if (out instanceof Syncable) {
+      ((Syncable)out).hsync();
+    }
+  }
+  
+  /** Forcibly free the direct buffers. */
+  private void freeBuffers() {
+    CryptoStreamUtils.freeDB(inBuffer);
+    CryptoStreamUtils.freeDB(outBuffer);
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java
new file mode 100644
index 0000000..820d775
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Seekable;
+
+import com.google.common.base.Preconditions;
+
+@InterfaceAudience.Private
+public class CryptoStreamUtils {
+  private static final int MIN_BUFFER_SIZE = 512;
+  
+  /** Forcibly free the direct buffer. */
+  public static void freeDB(ByteBuffer buffer) {
+    if (buffer instanceof sun.nio.ch.DirectBuffer) {
+      final sun.misc.Cleaner bufferCleaner =
+          ((sun.nio.ch.DirectBuffer) buffer).cleaner();
+      bufferCleaner.clean();
+    }
+  }
+  
+  /** Read crypto buffer size */
+  public static int getBufferSize(Configuration conf) {
+    return conf.getInt(HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY, 
+        HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT);
+  }
+  
+  /** Check and floor buffer size */
+  public static int checkBufferSize(CryptoCodec codec, int bufferSize) {
+    Preconditions.checkArgument(bufferSize >= MIN_BUFFER_SIZE, 
+        "Minimum value of buffer size is " + MIN_BUFFER_SIZE + ".");
+    return bufferSize - bufferSize % codec.getCipherSuite()
+        .getAlgorithmBlockSize();
+  }
+  
+  /**
+   * If input stream is {@link org.apache.hadoop.fs.Seekable}, return it's
+   * current position, otherwise return 0;
+   */
+  public static long getInputStreamOffset(InputStream in) throws IOException {
+    if (in instanceof Seekable) {
+      return ((Seekable) in).getPos();
+    }
+    return 0;
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java
new file mode 100644
index 0000000..9958415
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface Decryptor {
+  
+  /**
+   * Initialize the decryptor and the internal decryption context. 
+   * reset.
+   * @param key decryption key.
+   * @param iv decryption initialization vector
+   * @throws IOException if initialization fails
+   */
+  public void init(byte[] key, byte[] iv) throws IOException;
+  
+  /**
+   * Indicate whether the decryption context is reset.
+   * <p/>
+   * Certain modes, like CTR, require a different IV depending on the 
+   * position in the stream. Generally, the decryptor maintains any necessary
+   * context for calculating the IV and counter so that no reinit is necessary 
+   * during the decryption. Reinit before each operation is inefficient.
+   * @return boolean whether context is reset.
+   */
+  public boolean isContextReset();
+  
+  /**
+   * This presents a direct interface decrypting with direct ByteBuffers.
+   * <p/>
+   * This function does not always decrypt the entire buffer and may potentially
+   * need to be called multiple times to process an entire buffer. The object 
+   * may hold the decryption context internally.
+   * <p/>
+   * Some implementations may require sufficient space in the destination 
+   * buffer to decrypt the entire input buffer.
+   * <p/>
+   * Upon return, inBuffer.position() will be advanced by the number of bytes
+   * read and outBuffer.position() by bytes written. Implementations should 
+   * not modify inBuffer.limit() and outBuffer.limit().
+   * <p/>
+   * @param inBuffer a direct {@link ByteBuffer} to read from. inBuffer may 
+   * not be null and inBuffer.remaining() must be > 0
+   * @param outBuffer a direct {@link ByteBuffer} to write to. outBuffer may 
+   * not be null and outBuffer.remaining() must be > 0
+   * @throws IOException if decryption fails
+   */
+  public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) 
+      throws IOException;
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java
new file mode 100644
index 0000000..6dc3cfb
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface Encryptor {
+  
+  /**
+   * Initialize the encryptor and the internal encryption context.
+   * @param key encryption key.
+   * @param iv encryption initialization vector
+   * @throws IOException if initialization fails
+   */
+  public void init(byte[] key, byte[] iv) throws IOException;
+  
+  /**
+   * Indicate whether the encryption context is reset.
+   * <p/>
+   * Certain modes, like CTR, require a different IV depending on the
+   * position in the stream. Generally, the encryptor maintains any necessary
+   * context for calculating the IV and counter so that no reinit is necessary
+   * during the encryption. Reinit before each operation is inefficient. 
+   * @return boolean whether context is reset.
+   */
+  public boolean isContextReset();
+  
+  /**
+   * This presents a direct interface encrypting with direct ByteBuffers.
+   * <p/>
+   * This function does not always encrypt the entire buffer and may potentially
+   * need to be called multiple times to process an entire buffer. The object 
+   * may hold the encryption context internally.
+   * <p/>
+   * Some implementations may require sufficient space in the destination 
+   * buffer to encrypt the entire input buffer.
+   * <p/>
+   * Upon return, inBuffer.position() will be advanced by the number of bytes
+   * read and outBuffer.position() by bytes written. Implementations should
+   * not modify inBuffer.limit() and outBuffer.limit().
+   * <p/>
+   * @param inBuffer a direct {@link ByteBuffer} to read from. inBuffer may 
+   * not be null and inBuffer.remaining() must be > 0
+   * @param outBuffer a direct {@link ByteBuffer} to write to. outBuffer may 
+   * not be null and outBuffer.remaining() must be > 0
+   * @throws IOException if encryption fails
+   */
+  public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) 
+      throws IOException;
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java
new file mode 100644
index 0000000..61ee743
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java
@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.security.GeneralSecurityException;
+import java.security.SecureRandom;
+
+import javax.crypto.Cipher;
+import javax.crypto.spec.IvParameterSpec;
+import javax.crypto.spec.SecretKeySpec;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+
+import com.google.common.base.Preconditions;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT;
+
+/**
+ * Implement the AES-CTR crypto codec using JCE provider.
+ */
+@InterfaceAudience.Private
+public class JceAesCtrCryptoCodec extends AesCtrCryptoCodec {
+  private static final Log LOG =
+      LogFactory.getLog(JceAesCtrCryptoCodec.class.getName());
+  
+  private Configuration conf;
+  private String provider;
+  private SecureRandom random;
+
+  public JceAesCtrCryptoCodec() {
+  }
+  
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+  
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+    provider = conf.get(HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY);
+    final String secureRandomAlg = conf.get(
+        HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY, 
+        HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT);
+    try {
+      random = (provider != null) ? 
+          SecureRandom.getInstance(secureRandomAlg, provider) : 
+            SecureRandom.getInstance(secureRandomAlg);
+    } catch (GeneralSecurityException e) {
+      LOG.warn(e.getMessage());
+      random = new SecureRandom();
+    }
+  }
+
+  @Override
+  public Encryptor createEncryptor() throws GeneralSecurityException {
+    return new JceAesCtrCipher(Cipher.ENCRYPT_MODE, provider);
+  }
+
+  @Override
+  public Decryptor createDecryptor() throws GeneralSecurityException {
+    return new JceAesCtrCipher(Cipher.DECRYPT_MODE, provider);
+  }
+  
+  @Override
+  public void generateSecureRandom(byte[] bytes) {
+    random.nextBytes(bytes);
+  }  
+  
+  private static class JceAesCtrCipher implements Encryptor, Decryptor {
+    private final Cipher cipher;
+    private final int mode;
+    private boolean contextReset = false;
+    
+    public JceAesCtrCipher(int mode, String provider) 
+        throws GeneralSecurityException {
+      this.mode = mode;
+      if (provider == null || provider.isEmpty()) {
+        cipher = Cipher.getInstance(SUITE.getName());
+      } else {
+        cipher = Cipher.getInstance(SUITE.getName(), provider);
+      }
+    }
+
+    @Override
+    public void init(byte[] key, byte[] iv) throws IOException {
+      Preconditions.checkNotNull(key);
+      Preconditions.checkNotNull(iv);
+      contextReset = false;
+      try {
+        cipher.init(mode, new SecretKeySpec(key, "AES"), 
+            new IvParameterSpec(iv));
+      } catch (Exception e) {
+        throw new IOException(e);
+      }
+    }
+
+    /**
+     * AES-CTR will consume all of the input data. It requires enough space in 
+     * the destination buffer to encrypt entire input buffer.
+     */
+    @Override
+    public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)
+        throws IOException {
+      process(inBuffer, outBuffer);
+    }
+    
+    /**
+     * AES-CTR will consume all of the input data. It requires enough space in
+     * the destination buffer to decrypt entire input buffer.
+     */
+    @Override
+    public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)
+        throws IOException {
+      process(inBuffer, outBuffer);
+    }
+    
+    private void process(ByteBuffer inBuffer, ByteBuffer outBuffer)
+        throws IOException {
+      try {
+        int inputSize = inBuffer.remaining();
+        // Cipher#update will maintain crypto context.
+        int n = cipher.update(inBuffer, outBuffer);
+        if (n < inputSize) {
+          /**
+           * Typically code will not get here. Cipher#update will consume all 
+           * input data and put result in outBuffer. 
+           * Cipher#doFinal will reset the crypto context.
+           */
+          contextReset = true;
+          cipher.doFinal(inBuffer, outBuffer);
+        }
+      } catch (Exception e) {
+        throw new IOException(e);
+      }
+    }
+    
+    @Override
+    public boolean isContextReset() {
+      return contextReset;
+    }
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java
new file mode 100644
index 0000000..4ca79b3
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.security.GeneralSecurityException;
+import java.security.SecureRandom;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.crypto.random.OsSecureRandom;
+import org.apache.hadoop.util.ReflectionUtils;
+
+/**
+ * Implement the AES-CTR crypto codec using JNI into OpenSSL.
+ */
+@InterfaceAudience.Private
+public class OpensslAesCtrCryptoCodec extends AesCtrCryptoCodec {
+  private static final Log LOG =
+      LogFactory.getLog(OpensslAesCtrCryptoCodec.class.getName());
+
+  private Configuration conf;
+  private Random random;
+  
+  public OpensslAesCtrCryptoCodec() {
+    String loadingFailureReason = OpensslCipher.getLoadingFailureReason();
+    if (loadingFailureReason != null) {
+      throw new RuntimeException(loadingFailureReason);
+    }
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+    final Class<? extends Random> klass = conf.getClass(
+        HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY, OsSecureRandom.class, 
+        Random.class);
+    try {
+      random = ReflectionUtils.newInstance(klass, conf);
+    } catch (Exception e) {
+      LOG.info("Unable to use " + klass.getName() + ".  Falling back to " +
+          "Java SecureRandom.", e);
+      this.random = new SecureRandom();
+    }
+  }
+
+  @Override
+  protected void finalize() throws Throwable {
+    try {
+      Closeable r = (Closeable) this.random;
+      r.close();
+    } catch (ClassCastException e) {
+    }
+    super.finalize();
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  @Override
+  public Encryptor createEncryptor() throws GeneralSecurityException {
+    return new OpensslAesCtrCipher(OpensslCipher.ENCRYPT_MODE);
+  }
+
+  @Override
+  public Decryptor createDecryptor() throws GeneralSecurityException {
+    return new OpensslAesCtrCipher(OpensslCipher.DECRYPT_MODE);
+  }
+  
+  @Override
+  public void generateSecureRandom(byte[] bytes) {
+    random.nextBytes(bytes);
+  }
+  
+  private static class OpensslAesCtrCipher implements Encryptor, Decryptor {
+    private final OpensslCipher cipher;
+    private final int mode;
+    private boolean contextReset = false;
+    
+    public OpensslAesCtrCipher(int mode) throws GeneralSecurityException {
+      this.mode = mode;
+      cipher = OpensslCipher.getInstance(SUITE.getName());
+    }
+
+    @Override
+    public void init(byte[] key, byte[] iv) throws IOException {
+      Preconditions.checkNotNull(key);
+      Preconditions.checkNotNull(iv);
+      contextReset = false;
+      cipher.init(mode, key, iv);
+    }
+    
+    /**
+     * AES-CTR will consume all of the input data. It requires enough space in 
+     * the destination buffer to encrypt entire input buffer.
+     */
+    @Override
+    public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)
+        throws IOException {
+      process(inBuffer, outBuffer);
+    }
+    
+    /**
+     * AES-CTR will consume all of the input data. It requires enough space in
+     * the destination buffer to decrypt entire input buffer.
+     */
+    @Override
+    public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)
+        throws IOException {
+      process(inBuffer, outBuffer);
+    }
+    
+    private void process(ByteBuffer inBuffer, ByteBuffer outBuffer)
+        throws IOException {
+      try {
+        int inputSize = inBuffer.remaining();
+        // OpensslCipher#update will maintain crypto context.
+        int n = cipher.update(inBuffer, outBuffer);
+        if (n < inputSize) {
+          /**
+           * Typically code will not get here. OpensslCipher#update will 
+           * consume all input data and put result in outBuffer. 
+           * OpensslCipher#doFinal will reset the crypto context.
+           */
+          contextReset = true;
+          cipher.doFinal(outBuffer);
+        }
+      } catch (Exception e) {
+        throw new IOException(e);
+      }
+    }
+    
+    @Override
+    public boolean isContextReset() {
+      return contextReset;
+    }
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
new file mode 100644
index 0000000..264652b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
@@ -0,0 +1,287 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.nio.ByteBuffer;
+import java.security.NoSuchAlgorithmException;
+import java.util.StringTokenizer;
+
+import javax.crypto.BadPaddingException;
+import javax.crypto.IllegalBlockSizeException;
+import javax.crypto.NoSuchPaddingException;
+import javax.crypto.ShortBufferException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.util.NativeCodeLoader;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * OpenSSL cipher using JNI.
+ * Currently only AES-CTR is supported. It's flexible to add 
+ * other crypto algorithms/modes.
+ */
+@InterfaceAudience.Private
+public final class OpensslCipher {
+  private static final Log LOG =
+      LogFactory.getLog(OpensslCipher.class.getName());
+  public static final int ENCRYPT_MODE = 1;
+  public static final int DECRYPT_MODE = 0;
+  
+  /** Currently only support AES/CTR/NoPadding. */
+  private static enum AlgMode {
+    AES_CTR;
+    
+    static int get(String algorithm, String mode) 
+        throws NoSuchAlgorithmException {
+      try {
+        return AlgMode.valueOf(algorithm + "_" + mode).ordinal();
+      } catch (Exception e) {
+        throw new NoSuchAlgorithmException("Doesn't support algorithm: " + 
+            algorithm + " and mode: " + mode);
+      }
+    }
+  }
+  
+  private static enum Padding {
+    NoPadding;
+    
+    static int get(String padding) throws NoSuchPaddingException {
+      try {
+        return Padding.valueOf(padding).ordinal();
+      } catch (Exception e) {
+        throw new NoSuchPaddingException("Doesn't support padding: " + padding);
+      }
+    }
+  }
+  
+  private long context = 0;
+  private final int alg;
+  private final int padding;
+  
+  private static final String loadingFailureReason;
+
+  static {
+    String loadingFailure = null;
+    try {
+      if (!NativeCodeLoader.buildSupportsOpenssl()) {
+        loadingFailure = "build does not support openssl.";
+      } else {
+        initIDs();
+      }
+    } catch (Throwable t) {
+      loadingFailure = t.getMessage();
+      LOG.debug("Failed to load OpenSSL Cipher.", t);
+    } finally {
+      loadingFailureReason = loadingFailure;
+    }
+  }
+  
+  public static String getLoadingFailureReason() {
+    return loadingFailureReason;
+  }
+  
+  private OpensslCipher(long context, int alg, int padding) {
+    this.context = context;
+    this.alg = alg;
+    this.padding = padding;
+  }
+  
+  /**
+   * Return an <code>OpensslCipher<code> object that implements the specified
+   * transformation.
+   * 
+   * @param transformation the name of the transformation, e.g., 
+   * AES/CTR/NoPadding.
+   * @return OpensslCipher an <code>OpensslCipher<code> object
+   * @throws NoSuchAlgorithmException if <code>transformation</code> is null, 
+   * empty, in an invalid format, or if Openssl doesn't implement the 
+   * specified algorithm.
+   * @throws NoSuchPaddingException if <code>transformation</code> contains 
+   * a padding scheme that is not available.
+   */
+  public static final OpensslCipher getInstance(String transformation) 
+      throws NoSuchAlgorithmException, NoSuchPaddingException {
+    Transform transform = tokenizeTransformation(transformation);
+    int algMode = AlgMode.get(transform.alg, transform.mode);
+    int padding = Padding.get(transform.padding);
+    long context = initContext(algMode, padding);
+    return new OpensslCipher(context, algMode, padding);
+  }
+  
+  /** Nested class for algorithm, mode and padding. */
+  private static class Transform {
+    final String alg;
+    final String mode;
+    final String padding;
+    
+    public Transform(String alg, String mode, String padding) {
+      this.alg = alg;
+      this.mode = mode;
+      this.padding = padding;
+    }
+  }
+  
+  private static Transform tokenizeTransformation(String transformation) 
+      throws NoSuchAlgorithmException {
+    if (transformation == null) {
+      throw new NoSuchAlgorithmException("No transformation given.");
+    }
+    
+    /*
+     * Array containing the components of a Cipher transformation:
+     * 
+     * index 0: algorithm (e.g., AES)
+     * index 1: mode (e.g., CTR)
+     * index 2: padding (e.g., NoPadding)
+     */
+    String[] parts = new String[3];
+    int count = 0;
+    StringTokenizer parser = new StringTokenizer(transformation, "/");
+    while (parser.hasMoreTokens() && count < 3) {
+      parts[count++] = parser.nextToken().trim();
+    }
+    if (count != 3 || parser.hasMoreTokens()) {
+      throw new NoSuchAlgorithmException("Invalid transformation format: " + 
+          transformation);
+    }
+    return new Transform(parts[0], parts[1], parts[2]);
+  }
+  
+  /**
+   * Initialize this cipher with a key and IV.
+   * 
+   * @param mode {@link #ENCRYPT_MODE} or {@link #DECRYPT_MODE}
+   * @param key crypto key
+   * @param iv crypto iv
+   */
+  public void init(int mode, byte[] key, byte[] iv) {
+    context = init(context, mode, alg, padding, key, iv);
+  }
+  
+  /**
+   * Continues a multiple-part encryption or decryption operation. The data
+   * is encrypted or decrypted, depending on how this cipher was initialized.
+   * <p/>
+   * 
+   * All <code>input.remaining()</code> bytes starting at 
+   * <code>input.position()</code> are processed. The result is stored in
+   * the output buffer.
+   * <p/>
+   * 
+   * Upon return, the input buffer's position will be equal to its limit;
+   * its limit will not have changed. The output buffer's position will have
+   * advanced by n, when n is the value returned by this method; the output
+   * buffer's limit will not have changed.
+   * <p/>
+   * 
+   * If <code>output.remaining()</code> bytes are insufficient to hold the
+   * result, a <code>ShortBufferException</code> is thrown.
+   * 
+   * @param input the input ByteBuffer
+   * @param output the output ByteBuffer
+   * @return int number of bytes stored in <code>output</code>
+   * @throws ShortBufferException if there is insufficient space in the
+   * output buffer
+   */
+  public int update(ByteBuffer input, ByteBuffer output) 
+      throws ShortBufferException {
+    checkState();
+    Preconditions.checkArgument(input.isDirect() && output.isDirect(), 
+        "Direct buffers are required.");
+    int len = update(context, input, input.position(), input.remaining(),
+        output, output.position(), output.remaining());
+    input.position(input.limit());
+    output.position(output.position() + len);
+    return len;
+  }
+  
+  /**
+   * Finishes a multiple-part operation. The data is encrypted or decrypted,
+   * depending on how this cipher was initialized.
+   * <p/>
+   * 
+   * The result is stored in the output buffer. Upon return, the output buffer's
+   * position will have advanced by n, where n is the value returned by this
+   * method; the output buffer's limit will not have changed.
+   * <p/>
+   * 
+   * If <code>output.remaining()</code> bytes are insufficient to hold the result,
+   * a <code>ShortBufferException</code> is thrown.
+   * <p/>
+   * 
+   * Upon finishing, this method resets this cipher object to the state it was
+   * in when previously initialized. That is, the object is available to encrypt
+   * or decrypt more data.
+   * <p/>
+   * 
+   * If any exception is thrown, this cipher object need to be reset before it 
+   * can be used again.
+   * 
+   * @param output the output ByteBuffer
+   * @return int number of bytes stored in <code>output</code>
+   * @throws ShortBufferException
+   * @throws IllegalBlockSizeException
+   * @throws BadPaddingException
+   */
+  public int doFinal(ByteBuffer output) throws ShortBufferException, 
+      IllegalBlockSizeException, BadPaddingException {
+    checkState();
+    Preconditions.checkArgument(output.isDirect(), "Direct buffer is required.");
+    int len = doFinal(context, output, output.position(), output.remaining());
+    output.position(output.position() + len);
+    return len;
+  }
+  
+  /** Forcibly clean the context. */
+  public void clean() {
+    if (context != 0) {
+      clean(context);
+      context = 0;
+    }
+  }
+
+  /** Check whether context is initialized. */
+  private void checkState() {
+    Preconditions.checkState(context != 0);
+  }
+  
+  @Override
+  protected void finalize() throws Throwable {
+    clean();
+  }
+
+  private native static void initIDs();
+  
+  private native static long initContext(int alg, int padding);
+  
+  private native long init(long context, int mode, int alg, int padding, 
+      byte[] key, byte[] iv);
+  
+  private native int update(long context, ByteBuffer input, int inputOffset, 
+      int inputLength, ByteBuffer output, int outputOffset, int maxOutputLength);
+  
+  private native int doFinal(long context, ByteBuffer output, int offset, 
+      int maxOutputLength);
+  
+  private native void clean(long context);
+  
+  public native static String getLibraryName();
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
new file mode 100644
index 0000000..b1fa988
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.random;
+
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.util.NativeCodeLoader;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * OpenSSL secure random using JNI.
+ * This implementation is thread-safe.
+ * <p/>
+ * 
+ * If using an Intel chipset with RDRAND, the high-performance hardware 
+ * random number generator will be used and it's much faster than
+ * {@link java.security.SecureRandom}. If RDRAND is unavailable, default
+ * OpenSSL secure random generator will be used. It's still faster
+ * and can generate strong random bytes.
+ * <p/>
+ * @see https://wiki.openssl.org/index.php/Random_Numbers
+ * @see http://en.wikipedia.org/wiki/RdRand
+ */
+@InterfaceAudience.Private
+public class OpensslSecureRandom extends Random {
+  private static final long serialVersionUID = -7828193502768789584L;
+  private static final Log LOG =
+      LogFactory.getLog(OpensslSecureRandom.class.getName());
+  
+  /** If native SecureRandom unavailable, use java SecureRandom */
+  private java.security.SecureRandom fallback = null;
+  private static boolean nativeEnabled = false;
+  static {
+    if (NativeCodeLoader.isNativeCodeLoaded() &&
+        NativeCodeLoader.buildSupportsOpenssl()) {
+      try {
+        initSR();
+        nativeEnabled = true;
+      } catch (Throwable t) {
+        LOG.error("Failed to load Openssl SecureRandom", t);
+      }
+    }
+  }
+  
+  public static boolean isNativeCodeLoaded() {
+    return nativeEnabled;
+  }
+  
+  public OpensslSecureRandom() {
+    if (!nativeEnabled) {
+      fallback = new java.security.SecureRandom();
+    }
+  }
+  
+  /**
+   * Generates a user-specified number of random bytes.
+   * It's thread-safe.
+   * 
+   * @param bytes the array to be filled in with random bytes.
+   */
+  @Override
+  public void nextBytes(byte[] bytes) {
+    if (!nativeEnabled || !nextRandBytes(bytes)) {
+      fallback.nextBytes(bytes);
+    }
+  }
+  
+  @Override
+  public void setSeed(long seed) {
+    // Self-seeding.
+  }
+  
+  /**
+   * Generates an integer containing the user-specified number of
+   * random bits (right justified, with leading zeros).
+   *
+   * @param numBits number of random bits to be generated, where
+   * 0 <= <code>numBits</code> <= 32.
+   *
+   * @return int an <code>int</code> containing the user-specified number
+   * of random bits (right justified, with leading zeros).
+   */
+  @Override
+  final protected int next(int numBits) {
+    Preconditions.checkArgument(numBits >= 0 && numBits <= 32);
+    int numBytes = (numBits + 7) / 8;
+    byte b[] = new byte[numBytes];
+    int next = 0;
+    
+    nextBytes(b);
+    for (int i = 0; i < numBytes; i++) {
+      next = (next << 8) + (b[i] & 0xFF);
+    }
+    
+    return next >>> (numBytes * 8 - numBits);
+  }
+  
+  private native static void initSR();
+  private native boolean nextRandBytes(byte[] bytes); 
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java
new file mode 100644
index 0000000..c6cb0a8
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.random;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IOUtils;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT;
+
+/**
+ * A Random implementation that uses random bytes sourced from the
+ * operating system.
+ */
+@InterfaceAudience.Private
+public class OsSecureRandom extends Random implements Closeable, Configurable {
+  private static final long serialVersionUID = 6391500337172057900L;
+
+  private transient Configuration conf;
+
+  private final int RESERVOIR_LENGTH = 8192;
+
+  private String randomDevPath;
+
+  private transient FileInputStream stream;
+
+  private final byte[] reservoir = new byte[RESERVOIR_LENGTH];
+
+  private int pos = reservoir.length;
+
+  private void fillReservoir(int min) {
+    if (pos >= reservoir.length - min) {
+      try {
+        IOUtils.readFully(stream, reservoir, 0, reservoir.length);
+      } catch (IOException e) {
+        throw new RuntimeException("failed to fill reservoir", e);
+      }
+      pos = 0;
+    }
+  }
+
+  public OsSecureRandom() {
+  }
+  
+  @Override
+  synchronized public void setConf(Configuration conf) {
+    this.conf = conf;
+    this.randomDevPath = conf.get(
+        HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY,
+        HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT);
+    File randomDevFile = new File(randomDevPath);
+    try {
+      this.stream = new FileInputStream(randomDevFile);
+      fillReservoir(0);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  synchronized public Configuration getConf() {
+    return conf;
+  }
+
+  @Override
+  synchronized public void nextBytes(byte[] bytes) {
+    int off = 0;
+    int n = 0;
+    while (off < bytes.length) {
+      fillReservoir(0);
+      n = Math.min(bytes.length - off, reservoir.length - pos);
+      System.arraycopy(reservoir, pos, bytes, off, n);
+      off += n;
+      pos += n;
+    }
+  }
+
+  @Override
+  synchronized protected int next(int nbits) {
+    fillReservoir(4);
+    int n = 0;
+    for (int i = 0; i < 4; i++) {
+      n = ((n << 8) | (reservoir[pos++] & 0xff));
+    }
+    return n & (0xffffffff >> (32 - nbits));
+  }
+
+  @Override
+  synchronized public void close() throws IOException {
+    stream.close();
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index b4aedb3..c1101c5f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -283,5 +283,4 @@
   public static final String NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
   public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY = "nfs.exports.allowed.hosts";
   public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
-
 }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 59c0814..e4ee78a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -288,6 +288,21 @@
   /** Class to override Sasl Properties for a connection */
   public static final String  HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS =
     "hadoop.security.saslproperties.resolver.class";
+  public static final String HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX = 
+    "hadoop.security.crypto.codec.classes";
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY =
+    "hadoop.security.crypto.cipher.suite";
+  public static final String HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT = 
+    "AES/CTR/NoPadding";
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY =
+    "hadoop.security.crypto.jce.provider";
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY = 
+    "hadoop.security.crypto.buffer.size";
+  /** Defalt value for HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY */
+  public static final int HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT = 8192;
   /** Class to override Impersonation provider */
   public static final String  HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS =
     "hadoop.security.impersonation.provider.class";
@@ -318,5 +333,20 @@
       "hadoop.security.kms.client.encrypted.key.cache.expiry";
   /** Default value for KMS_CLIENT_ENC_KEY_CACHE_EXPIRY (12 hrs)*/
   public static final int KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT = 43200000;
+
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY = 
+    "hadoop.security.java.secure.random.algorithm";
+  /** Defalt value for HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY */
+  public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT = 
+    "SHA1PRNG";
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY = 
+    "hadoop.security.secure.random.impl";
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY = 
+    "hadoop.security.random.device.file.path";
+  public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT = 
+    "/dev/urandom";
 }
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
index 212fbba..3f97ea8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
@@ -102,7 +102,7 @@
   }
 
   /**
-   * Get a reference to the wrapped output stream. Used by unit tests.
+   * Get a reference to the wrapped output stream.
    *
    * @return the underlying output stream
    */
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java
new file mode 100644
index 0000000..f960233
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.commons.codec.binary.Hex;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.crypto.CipherSuite;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * FileEncryptionInfo encapsulates all the encryption-related information for
+ * an encrypted file.
+ */
+@InterfaceAudience.Private
+public class FileEncryptionInfo {
+
+  private final CipherSuite cipherSuite;
+  private final byte[] edek;
+  private final byte[] iv;
+  private final String ezKeyVersionName;
+
+  /**
+   * Create a FileEncryptionInfo.
+   *
+   * @param suite CipherSuite used to encrypt the file
+   * @param edek encrypted data encryption key (EDEK) of the file
+   * @param iv initialization vector (IV) used to encrypt the file
+   * @param ezKeyVersionName name of the KeyVersion used to encrypt the
+   *                         encrypted data encryption key.
+   */
+  public FileEncryptionInfo(final CipherSuite suite, final byte[] edek,
+      final byte[] iv, final String ezKeyVersionName) {
+    checkNotNull(suite);
+    checkNotNull(edek);
+    checkNotNull(iv);
+    checkNotNull(ezKeyVersionName);
+    checkArgument(edek.length == suite.getAlgorithmBlockSize(),
+        "Unexpected key length");
+    checkArgument(iv.length == suite.getAlgorithmBlockSize(),
+        "Unexpected IV length");
+    this.cipherSuite = suite;
+    this.edek = edek;
+    this.iv = iv;
+    this.ezKeyVersionName = ezKeyVersionName;
+  }
+
+  /**
+   * @return {@link org.apache.hadoop.crypto.CipherSuite} used to encrypt
+   * the file.
+   */
+  public CipherSuite getCipherSuite() {
+    return cipherSuite;
+  }
+
+  /**
+   * @return encrypted data encryption key (EDEK) for the file
+   */
+  public byte[] getEncryptedDataEncryptionKey() {
+    return edek;
+  }
+
+  /**
+   * @return initialization vector (IV) for the cipher used to encrypt the file
+   */
+  public byte[] getIV() {
+    return iv;
+  }
+
+  /**
+   * @return name of the encryption zone KeyVersion used to encrypt the
+   * encrypted data encryption key (EDEK).
+   */
+  public String getEzKeyVersionName() { return ezKeyVersionName; }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder("{");
+    builder.append("cipherSuite: " + cipherSuite);
+    builder.append(", edek: " + Hex.encodeHexString(edek));
+    builder.append(", iv: " + Hex.encodeHexString(iv));
+    builder.append(", ezKeyVersionName: " + ezKeyVersionName);
+    builder.append("}");
+    return builder.toString();
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataInputStream.java
new file mode 100644
index 0000000..8758d28
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataInputStream.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.crypto;
+
+import java.io.IOException;
+
+import org.apache.hadoop.crypto.CryptoCodec;
+import org.apache.hadoop.crypto.CryptoInputStream;
+import org.apache.hadoop.fs.FSDataInputStream;
+
+public class CryptoFSDataInputStream extends FSDataInputStream {
+  
+  public CryptoFSDataInputStream(FSDataInputStream in, CryptoCodec codec, 
+      int bufferSize, byte[] key, byte[] iv) throws IOException {
+    super(new CryptoInputStream(in, codec, bufferSize, key, iv)); 
+  }
+  
+  public CryptoFSDataInputStream(FSDataInputStream in, CryptoCodec codec, 
+      byte[] key, byte[] iv) throws IOException {
+    super(new CryptoInputStream(in, codec, key, iv)); 
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataOutputStream.java
new file mode 100644
index 0000000..040fbcb
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataOutputStream.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.crypto;
+
+import java.io.IOException;
+
+import org.apache.hadoop.crypto.CryptoCodec;
+import org.apache.hadoop.crypto.CryptoOutputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+
+public class CryptoFSDataOutputStream extends FSDataOutputStream {
+  private final FSDataOutputStream fsOut;
+  
+  public CryptoFSDataOutputStream(FSDataOutputStream out, CryptoCodec codec,
+      int bufferSize, byte[] key, byte[] iv) throws IOException {
+    super(new CryptoOutputStream(out, codec, bufferSize, key, iv, 
+        out.getPos()), null, out.getPos()); 
+    this.fsOut = out;
+  }
+  
+  public CryptoFSDataOutputStream(FSDataOutputStream out, CryptoCodec codec,
+      byte[] key, byte[] iv) throws IOException {
+    super(new CryptoOutputStream(out, codec, key, iv, out.getPos()), 
+        null, out.getPos()); 
+    this.fsOut = out;
+  }
+  
+  @Override
+  public long getPos() {
+    return fsOut.getPos();
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
index ac3b1e6..da67f1c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
@@ -58,6 +58,17 @@
   private boolean writeChecksum = true;
   
   /**
+   * The name of the raw xattr namespace. It would be nice to use
+   * XAttr.RAW.name() but we can't reference the hadoop-hdfs project.
+   */
+  private static final String RAW = "raw.";
+
+  /**
+   * The name of the reserved raw directory.
+   */
+  private static final String RESERVED_RAW = "/.reserved/raw";
+
+  /**
    * 
    * This method is used to enable the force(-f)  option while copying the files.
    * 
@@ -231,7 +242,7 @@
   /**
    * Called with a source and target destination pair
    * @param src for the operation
-   * @param target for the operation
+   * @param dst for the operation
    * @throws IOException if anything goes wrong
    */
   protected void processPath(PathData src, PathData dst) throws IOException {
@@ -253,6 +264,8 @@
       // modify dst as we descend to append the basename of the
       // current directory being processed
       dst = getTargetPath(src);
+      final boolean preserveRawXattrs =
+          checkPathsForReservedRaw(src.path, dst.path);
       if (dst.exists) {
         if (!dst.stat.isDirectory()) {
           throw new PathIsNotDirectoryException(dst.toString());
@@ -268,7 +281,7 @@
       }      
       super.recursePath(src);
       if (dst.stat.isDirectory()) {
-        preserveAttributes(src, dst);
+        preserveAttributes(src, dst, preserveRawXattrs);
       }
     } finally {
       dst = savedDst;
@@ -295,19 +308,61 @@
    * @param target where to copy the item
    * @throws IOException if copy fails
    */ 
-  protected void copyFileToTarget(PathData src, PathData target) throws IOException {
+  protected void copyFileToTarget(PathData src, PathData target)
+      throws IOException {
+    final boolean preserveRawXattrs =
+        checkPathsForReservedRaw(src.path, target.path);
     src.fs.setVerifyChecksum(verifyChecksum);
     InputStream in = null;
     try {
       in = src.fs.open(src.path);
       copyStreamToTarget(in, target);
-      preserveAttributes(src, target);
+      preserveAttributes(src, target, preserveRawXattrs);
     } finally {
       IOUtils.closeStream(in);
     }
   }
   
   /**
+   * Check the source and target paths to ensure that they are either both in
+   * /.reserved/raw or neither in /.reserved/raw. If neither src nor target are
+   * in /.reserved/raw, then return false, indicating not to preserve raw.*
+   * xattrs. If both src/target are in /.reserved/raw, then return true,
+   * indicating raw.* xattrs should be preserved. If only one of src/target is
+   * in /.reserved/raw then throw an exception.
+   *
+   * @param src The source path to check. This should be a fully-qualified
+   *            path, not relative.
+   * @param target The target path to check. This should be a fully-qualified
+   *               path, not relative.
+   * @return true if raw.* xattrs should be preserved.
+   * @throws PathOperationException is only one of src/target are in
+   * /.reserved/raw.
+   */
+  private boolean checkPathsForReservedRaw(Path src, Path target)
+      throws PathOperationException {
+    final boolean srcIsRR = Path.getPathWithoutSchemeAndAuthority(src).
+        toString().startsWith(RESERVED_RAW);
+    final boolean dstIsRR = Path.getPathWithoutSchemeAndAuthority(target).
+        toString().startsWith(RESERVED_RAW);
+    boolean preserveRawXattrs = false;
+    if (srcIsRR && !dstIsRR) {
+      final String s = "' copy from '" + RESERVED_RAW + "' to non '" +
+          RESERVED_RAW + "'. Either both source and target must be in '" +
+          RESERVED_RAW + "' or neither.";
+      throw new PathOperationException("'" + src.toString() + s);
+    } else if (!srcIsRR && dstIsRR) {
+      final String s = "' copy from non '" + RESERVED_RAW +"' to '" +
+          RESERVED_RAW + "'. Either both source and target must be in '" +
+          RESERVED_RAW + "' or neither.";
+      throw new PathOperationException("'" + dst.toString() + s);
+    } else if (srcIsRR && dstIsRR) {
+      preserveRawXattrs = true;
+    }
+    return preserveRawXattrs;
+  }
+
+  /**
    * Copies the stream contents to a temporary file.  If the copy is
    * successful, the temporary file will be renamed to the real path,
    * else the temporary file will be deleted.
@@ -337,9 +392,11 @@
    * attribute to preserve.
    * @param src source to preserve
    * @param target where to preserve attributes
+   * @param preserveRawXAttrs true if raw.* xattrs should be preserved
    * @throws IOException if fails to preserve attributes
    */
-  protected void preserveAttributes(PathData src, PathData target)
+  protected void preserveAttributes(PathData src, PathData target,
+      boolean preserveRawXAttrs)
       throws IOException {
     if (shouldPreserve(FileAttribute.TIMESTAMPS)) {
       target.fs.setTimes(
@@ -369,13 +426,17 @@
         target.fs.setAcl(target.path, srcFullEntries);
       }
     }
-    if (shouldPreserve(FileAttribute.XATTR)) {
+    final boolean preserveXAttrs = shouldPreserve(FileAttribute.XATTR);
+    if (preserveXAttrs || preserveRawXAttrs) {
       Map<String, byte[]> srcXAttrs = src.fs.getXAttrs(src.path);
       if (srcXAttrs != null) {
         Iterator<Entry<String, byte[]>> iter = srcXAttrs.entrySet().iterator();
         while (iter.hasNext()) {
           Entry<String, byte[]> entry = iter.next();
-          target.fs.setXAttr(target.path, entry.getKey(), entry.getValue());
+          final String xattrName = entry.getKey();
+          if (xattrName.startsWith(RAW) || preserveXAttrs) {
+            target.fs.setXAttr(target.path, entry.getKey(), entry.getValue());
+          }
         }
       }
     }
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
index 4dd2f4a..3fd870c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
@@ -143,7 +143,11 @@
       "timestamps, ownership, permission. If -pa is specified, " +
       "then preserves permission also because ACL is a super-set of " +
       "permission. Passing -f overwrites the destination if it " +
-      "already exists.\n";
+      "already exists. raw namespace extended attributes are preserved " +
+      "if (1) they are supported (HDFS only) and, (2) all of the source and " +
+      "target pathnames are in the /.reserved/raw hierarchy. raw namespace " +
+      "xattr preservation is determined solely by the presence (or absence) " +
+      "of the /.reserved/raw prefix and not by the -p option.\n";
 
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
index 5667d98..533fc07 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
@@ -78,6 +78,11 @@
    * Returns true only if this build was compiled with support for snappy.
    */
   public static native boolean buildSupportsSnappy();
+  
+  /**
+   * Returns true only if this build was compiled with support for openssl.
+   */
+  public static native boolean buildSupportsOpenssl();
 
   public static native String getLibraryName();
 
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
index 84117e2..0d87bce 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeLibraryChecker.java
@@ -20,6 +20,7 @@
 
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.OpensslCipher;
 import org.apache.hadoop.io.compress.Lz4Codec;
 import org.apache.hadoop.io.compress.SnappyCodec;
 import org.apache.hadoop.io.compress.bzip2.Bzip2Factory;
@@ -60,6 +61,8 @@
     // lz4 is linked within libhadoop
     boolean lz4Loaded = nativeHadoopLoaded;
     boolean bzip2Loaded = Bzip2Factory.isNativeBzip2Loaded(conf);
+    boolean openSslLoaded = false;
+    String openSslDetail = "";
     String hadoopLibraryName = "";
     String zlibLibraryName = "";
     String snappyLibraryName = "";
@@ -76,6 +79,13 @@
       if (snappyLoaded && NativeCodeLoader.buildSupportsSnappy()) {
         snappyLibraryName = SnappyCodec.getLibraryName();
       }
+      if (OpensslCipher.getLoadingFailureReason() != null) {
+        openSslDetail = OpensslCipher.getLoadingFailureReason();
+        openSslLoaded = false;
+      } else {
+        openSslDetail = OpensslCipher.getLibraryName();
+        openSslLoaded = true;
+      }
       if (lz4Loaded) {
         lz4LibraryName = Lz4Codec.getLibraryName();
       }
@@ -84,11 +94,12 @@
       }
     }
     System.out.println("Native library checking:");
-    System.out.printf("hadoop: %b %s\n", nativeHadoopLoaded, hadoopLibraryName);
-    System.out.printf("zlib:   %b %s\n", zlibLoaded, zlibLibraryName);
-    System.out.printf("snappy: %b %s\n", snappyLoaded, snappyLibraryName);
-    System.out.printf("lz4:    %b %s\n", lz4Loaded, lz4LibraryName);
-    System.out.printf("bzip2:  %b %s\n", bzip2Loaded, bzip2LibraryName);
+    System.out.printf("hadoop:  %b %s\n", nativeHadoopLoaded, hadoopLibraryName);
+    System.out.printf("zlib:    %b %s\n", zlibLoaded, zlibLibraryName);
+    System.out.printf("snappy:  %b %s\n", snappyLoaded, snappyLibraryName);
+    System.out.printf("lz4:     %b %s\n", lz4Loaded, lz4LibraryName);
+    System.out.printf("bzip2:   %b %s\n", bzip2Loaded, bzip2LibraryName);
+    System.out.printf("openssl: %b %s\n", openSslLoaded, openSslDetail);
     if ((!nativeHadoopLoaded) ||
         (checkAll && !(zlibLoaded && snappyLoaded && lz4Loaded && bzip2Loaded))) {
       // return 1 to indicated check failed
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
new file mode 100644
index 0000000..5cb5bba
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/OpensslCipher.c
@@ -0,0 +1,382 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+#include "org_apache_hadoop_crypto.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+ 
+#include "org_apache_hadoop_crypto_OpensslCipher.h"
+
+#ifdef UNIX
+static EVP_CIPHER_CTX * (*dlsym_EVP_CIPHER_CTX_new)(void);
+static void (*dlsym_EVP_CIPHER_CTX_free)(EVP_CIPHER_CTX *);
+static int (*dlsym_EVP_CIPHER_CTX_cleanup)(EVP_CIPHER_CTX *);
+static void (*dlsym_EVP_CIPHER_CTX_init)(EVP_CIPHER_CTX *);
+static int (*dlsym_EVP_CIPHER_CTX_set_padding)(EVP_CIPHER_CTX *, int);
+static int (*dlsym_EVP_CipherInit_ex)(EVP_CIPHER_CTX *, const EVP_CIPHER *,  \
+           ENGINE *, const unsigned char *, const unsigned char *, int);
+static int (*dlsym_EVP_CipherUpdate)(EVP_CIPHER_CTX *, unsigned char *,  \
+           int *, const unsigned char *, int);
+static int (*dlsym_EVP_CipherFinal_ex)(EVP_CIPHER_CTX *, unsigned char *, int *);
+static EVP_CIPHER * (*dlsym_EVP_aes_256_ctr)(void);
+static EVP_CIPHER * (*dlsym_EVP_aes_128_ctr)(void);
+static void *openssl;
+#endif
+
+#ifdef WINDOWS
+typedef EVP_CIPHER_CTX * (__cdecl *__dlsym_EVP_CIPHER_CTX_new)(void);
+typedef void (__cdecl *__dlsym_EVP_CIPHER_CTX_free)(EVP_CIPHER_CTX *);
+typedef int (__cdecl *__dlsym_EVP_CIPHER_CTX_cleanup)(EVP_CIPHER_CTX *);
+typedef void (__cdecl *__dlsym_EVP_CIPHER_CTX_init)(EVP_CIPHER_CTX *);
+typedef int (__cdecl *__dlsym_EVP_CIPHER_CTX_set_padding)(EVP_CIPHER_CTX *, int);
+typedef int (__cdecl *__dlsym_EVP_CipherInit_ex)(EVP_CIPHER_CTX *,  \
+             const EVP_CIPHER *, ENGINE *, const unsigned char *,  \
+             const unsigned char *, int);
+typedef int (__cdecl *__dlsym_EVP_CipherUpdate)(EVP_CIPHER_CTX *,  \
+             unsigned char *, int *, const unsigned char *, int);
+typedef int (__cdecl *__dlsym_EVP_CipherFinal_ex)(EVP_CIPHER_CTX *,  \
+             unsigned char *, int *);
+typedef EVP_CIPHER * (__cdecl *__dlsym_EVP_aes_256_ctr)(void);
+typedef EVP_CIPHER * (__cdecl *__dlsym_EVP_aes_128_ctr)(void);
+static __dlsym_EVP_CIPHER_CTX_new dlsym_EVP_CIPHER_CTX_new;
+static __dlsym_EVP_CIPHER_CTX_free dlsym_EVP_CIPHER_CTX_free;
+static __dlsym_EVP_CIPHER_CTX_cleanup dlsym_EVP_CIPHER_CTX_cleanup;
+static __dlsym_EVP_CIPHER_CTX_init dlsym_EVP_CIPHER_CTX_init;
+static __dlsym_EVP_CIPHER_CTX_set_padding dlsym_EVP_CIPHER_CTX_set_padding;
+static __dlsym_EVP_CipherInit_ex dlsym_EVP_CipherInit_ex;
+static __dlsym_EVP_CipherUpdate dlsym_EVP_CipherUpdate;
+static __dlsym_EVP_CipherFinal_ex dlsym_EVP_CipherFinal_ex;
+static __dlsym_EVP_aes_256_ctr dlsym_EVP_aes_256_ctr;
+static __dlsym_EVP_aes_128_ctr dlsym_EVP_aes_128_ctr;
+static HMODULE openssl;
+#endif
+
+static void loadAesCtr(JNIEnv *env)
+{
+#ifdef UNIX
+  LOAD_DYNAMIC_SYMBOL(dlsym_EVP_aes_256_ctr, env, openssl, "EVP_aes_256_ctr");
+  LOAD_DYNAMIC_SYMBOL(dlsym_EVP_aes_128_ctr, env, openssl, "EVP_aes_128_ctr");
+#endif
+
+#ifdef WINDOWS
+  LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_aes_256_ctr, dlsym_EVP_aes_256_ctr,  \
+                      env, openssl, "EVP_aes_256_ctr");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_aes_128_ctr, dlsym_EVP_aes_128_ctr,  \
+                      env, openssl, "EVP_aes_128_ctr");
+#endif
+}
+
+JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_initIDs
+    (JNIEnv *env, jclass clazz)
+{
+  char msg[1000];
+#ifdef UNIX
+  openssl = dlopen(HADOOP_OPENSSL_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
+#endif
+
+#ifdef WINDOWS
+  openssl = LoadLibrary(HADOOP_OPENSSL_LIBRARY);
+#endif
+
+  if (!openssl) {
+    snprintf(msg, sizeof(msg), "Cannot load %s (%s)!", HADOOP_OPENSSL_LIBRARY,  \
+        dlerror());
+    THROW(env, "java/lang/UnsatisfiedLinkError", msg);
+    return;
+  }
+
+#ifdef UNIX
+  dlerror();  // Clear any existing error
+  LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_new, env, openssl,  \
+                      "EVP_CIPHER_CTX_new");
+  LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_free, env, openssl,  \
+                      "EVP_CIPHER_CTX_free");
+  LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_cleanup, env, openssl,  \
+                      "EVP_CIPHER_CTX_cleanup");
+  LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_init, env, openssl,  \
+                      "EVP_CIPHER_CTX_init");
+  LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_set_padding, env, openssl,  \
+                      "EVP_CIPHER_CTX_set_padding");
+  LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CipherInit_ex, env, openssl,  \
+                      "EVP_CipherInit_ex");
+  LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CipherUpdate, env, openssl,  \
+                      "EVP_CipherUpdate");
+  LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CipherFinal_ex, env, openssl,  \
+                      "EVP_CipherFinal_ex");
+#endif
+
+#ifdef WINDOWS
+  LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_new, dlsym_EVP_CIPHER_CTX_new,  \
+                      env, openssl, "EVP_CIPHER_CTX_new");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_free, dlsym_EVP_CIPHER_CTX_free,  \
+                      env, openssl, "EVP_CIPHER_CTX_free");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_cleanup,  \
+                      dlsym_EVP_CIPHER_CTX_cleanup, env, 
+                      openssl, "EVP_CIPHER_CTX_cleanup");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_init, dlsym_EVP_CIPHER_CTX_init,  \
+                      env, openssl, "EVP_CIPHER_CTX_init");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_set_padding,  \
+                      dlsym_EVP_CIPHER_CTX_set_padding, env,  \
+                      openssl, "EVP_CIPHER_CTX_set_padding");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CipherInit_ex, dlsym_EVP_CipherInit_ex,  \
+                      env, openssl, "EVP_CipherInit_ex");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CipherUpdate, dlsym_EVP_CipherUpdate,  \
+                      env, openssl, "EVP_CipherUpdate");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CipherFinal_ex, dlsym_EVP_CipherFinal_ex,  \
+                      env, openssl, "EVP_CipherFinal_ex");
+#endif
+
+  loadAesCtr(env);
+  jthrowable jthr = (*env)->ExceptionOccurred(env);
+  if (jthr) {
+    (*env)->DeleteLocalRef(env, jthr);
+    THROW(env, "java/lang/UnsatisfiedLinkError",  \
+        "Cannot find AES-CTR support, is your version of Openssl new enough?");
+    return;
+  }
+}
+
+JNIEXPORT jlong JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_initContext
+    (JNIEnv *env, jclass clazz, jint alg, jint padding)
+{
+  if (alg != AES_CTR) {
+    THROW(env, "java/security/NoSuchAlgorithmException", NULL);
+    return (jlong)0;
+  }
+  if (padding != NOPADDING) {
+    THROW(env, "javax/crypto/NoSuchPaddingException", NULL);
+    return (jlong)0;
+  }
+  
+  if (dlsym_EVP_aes_256_ctr == NULL || dlsym_EVP_aes_128_ctr == NULL) {
+    THROW(env, "java/security/NoSuchAlgorithmException",  \
+        "Doesn't support AES CTR.");
+    return (jlong)0;
+  }
+  
+  // Create and initialize a EVP_CIPHER_CTX
+  EVP_CIPHER_CTX *context = dlsym_EVP_CIPHER_CTX_new();
+  if (!context) {
+    THROW(env, "java/lang/OutOfMemoryError", NULL);
+    return (jlong)0;
+  }
+   
+  return JLONG(context);
+}
+
+// Only supports AES-CTR currently
+static EVP_CIPHER * getEvpCipher(int alg, int keyLen)
+{
+  EVP_CIPHER *cipher = NULL;
+  if (alg == AES_CTR) {
+    if (keyLen == KEY_LENGTH_256) {
+      cipher = dlsym_EVP_aes_256_ctr();
+    } else if (keyLen == KEY_LENGTH_128) {
+      cipher = dlsym_EVP_aes_128_ctr();
+    }
+  }
+  return cipher;
+}
+
+JNIEXPORT jlong JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_init
+    (JNIEnv *env, jobject object, jlong ctx, jint mode, jint alg, jint padding, 
+    jbyteArray key, jbyteArray iv)
+{
+  int jKeyLen = (*env)->GetArrayLength(env, key);
+  int jIvLen = (*env)->GetArrayLength(env, iv);
+  if (jKeyLen != KEY_LENGTH_128 && jKeyLen != KEY_LENGTH_256) {
+    THROW(env, "java/lang/IllegalArgumentException", "Invalid key length.");
+    return (jlong)0;
+  }
+  if (jIvLen != IV_LENGTH) {
+    THROW(env, "java/lang/IllegalArgumentException", "Invalid iv length.");
+    return (jlong)0;
+  }
+  
+  EVP_CIPHER_CTX *context = CONTEXT(ctx);
+  if (context == 0) {
+    // Create and initialize a EVP_CIPHER_CTX
+    context = dlsym_EVP_CIPHER_CTX_new();
+    if (!context) {
+      THROW(env, "java/lang/OutOfMemoryError", NULL);
+      return (jlong)0;
+    }
+  }
+  
+  jbyte *jKey = (*env)->GetByteArrayElements(env, key, NULL);
+  if (jKey == NULL) {
+    THROW(env, "java/lang/InternalError", "Cannot get bytes array for key.");
+    return (jlong)0;
+  }
+  jbyte *jIv = (*env)->GetByteArrayElements(env, iv, NULL);
+  if (jIv == NULL) {
+    (*env)->ReleaseByteArrayElements(env, key, jKey, 0);
+    THROW(env, "java/lang/InternalError", "Cannot get bytes array for iv.");
+    return (jlong)0;
+  }
+  
+  int rc = dlsym_EVP_CipherInit_ex(context, getEvpCipher(alg, jKeyLen),  \
+      NULL, (unsigned char *)jKey, (unsigned char *)jIv, mode == ENCRYPT_MODE);
+  (*env)->ReleaseByteArrayElements(env, key, jKey, 0);
+  (*env)->ReleaseByteArrayElements(env, iv, jIv, 0);
+  if (rc == 0) {
+    dlsym_EVP_CIPHER_CTX_cleanup(context);
+    THROW(env, "java/lang/InternalError", "Error in EVP_CipherInit_ex.");
+    return (jlong)0;
+  }
+  
+  if (padding == NOPADDING) {
+    dlsym_EVP_CIPHER_CTX_set_padding(context, 0);
+  }
+  
+  return JLONG(context);
+}
+
+// https://www.openssl.org/docs/crypto/EVP_EncryptInit.html
+static int check_update_max_output_len(EVP_CIPHER_CTX *context, int input_len, 
+    int max_output_len)
+{
+  if (context->flags & EVP_CIPH_NO_PADDING) {
+    if (max_output_len >= input_len) {
+      return 1;
+    }
+    return 0;
+  } else {
+    int b = context->cipher->block_size;
+    if (context->encrypt) {
+      if (max_output_len >= input_len + b - 1) {
+        return 1;
+      }
+    } else {
+      if (max_output_len >= input_len + b) {
+        return 1;
+      }
+    }
+    
+    return 0;
+  }
+}
+
+JNIEXPORT jint JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_update
+    (JNIEnv *env, jobject object, jlong ctx, jobject input, jint input_offset, 
+    jint input_len, jobject output, jint output_offset, jint max_output_len)
+{
+  EVP_CIPHER_CTX *context = CONTEXT(ctx);
+  if (!check_update_max_output_len(context, input_len, max_output_len)) {
+    THROW(env, "javax/crypto/ShortBufferException",  \
+        "Output buffer is not sufficient.");
+    return 0;
+  }
+  unsigned char *input_bytes = (*env)->GetDirectBufferAddress(env, input);
+  unsigned char *output_bytes = (*env)->GetDirectBufferAddress(env, output);
+  if (input_bytes == NULL || output_bytes == NULL) {
+    THROW(env, "java/lang/InternalError", "Cannot get buffer address.");
+    return 0;
+  }
+  input_bytes = input_bytes + input_offset;
+  output_bytes = output_bytes + output_offset;
+  
+  int output_len = 0;
+  if (!dlsym_EVP_CipherUpdate(context, output_bytes, &output_len,  \
+      input_bytes, input_len)) {
+    dlsym_EVP_CIPHER_CTX_cleanup(context);
+    THROW(env, "java/lang/InternalError", "Error in EVP_CipherUpdate.");
+    return 0;
+  }
+  return output_len;
+}
+
+// https://www.openssl.org/docs/crypto/EVP_EncryptInit.html
+static int check_doFinal_max_output_len(EVP_CIPHER_CTX *context, 
+    int max_output_len)
+{
+  if (context->flags & EVP_CIPH_NO_PADDING) {
+    return 1;
+  } else {
+    int b = context->cipher->block_size;
+    if (max_output_len >= b) {
+      return 1;
+    }
+    
+    return 0;
+  }
+}
+
+JNIEXPORT jint JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_doFinal
+    (JNIEnv *env, jobject object, jlong ctx, jobject output, jint offset, 
+    jint max_output_len)
+{
+  EVP_CIPHER_CTX *context = CONTEXT(ctx);
+  if (!check_doFinal_max_output_len(context, max_output_len)) {
+    THROW(env, "javax/crypto/ShortBufferException",  \
+        "Output buffer is not sufficient.");
+    return 0;
+  }
+  unsigned char *output_bytes = (*env)->GetDirectBufferAddress(env, output);
+  if (output_bytes == NULL) {
+    THROW(env, "java/lang/InternalError", "Cannot get buffer address.");
+    return 0;
+  }
+  output_bytes = output_bytes + offset;
+  
+  int output_len = 0;
+  if (!dlsym_EVP_CipherFinal_ex(context, output_bytes, &output_len)) {
+    dlsym_EVP_CIPHER_CTX_cleanup(context);
+    THROW(env, "java/lang/InternalError", "Error in EVP_CipherFinal_ex.");
+    return 0;
+  }
+  return output_len;
+}
+
+JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_clean
+    (JNIEnv *env, jobject object, jlong ctx) 
+{
+  EVP_CIPHER_CTX *context = CONTEXT(ctx);
+  if (context) {
+    dlsym_EVP_CIPHER_CTX_free(context);
+  }
+}
+
+JNIEXPORT jstring JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_getLibraryName
+    (JNIEnv *env, jclass clazz) 
+{
+#ifdef UNIX
+  if (dlsym_EVP_CIPHER_CTX_init) {
+    Dl_info dl_info;
+    if(dladdr(
+        dlsym_EVP_CIPHER_CTX_init,
+        &dl_info)) {
+      return (*env)->NewStringUTF(env, dl_info.dli_fname);
+    }
+  }
+
+  return (*env)->NewStringUTF(env, HADOOP_OPENSSL_LIBRARY);
+#endif
+
+#ifdef WINDOWS
+  LPWSTR filename = NULL;
+  GetLibraryName(dlsym_EVP_CIPHER_CTX_init, &filename);
+  if (filename != NULL) {
+    return (*env)->NewString(env, filename, (jsize) wcslen(filename));
+  } else {
+    return (*env)->NewStringUTF(env, "Unavailable");
+  }
+#endif
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/org_apache_hadoop_crypto.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/org_apache_hadoop_crypto.h
new file mode 100644
index 0000000..0afab02
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/org_apache_hadoop_crypto.h
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+#ifndef ORG_APACHE_HADOOP_CRYPTO_H
+#define ORG_APACHE_HADOOP_CRYPTO_H
+
+#include "org_apache_hadoop.h"
+
+#ifdef UNIX
+#include <dlfcn.h>
+#include "config.h"
+#endif
+
+#ifdef WINDOWS
+#include "winutils.h"
+#endif
+
+#include <openssl/aes.h>
+#include <openssl/evp.h>
+#include <openssl/err.h>
+
+/**
+ * A helper macro to convert the java 'context-handle' 
+ * to a EVP_CIPHER_CTX pointer. 
+ */
+#define CONTEXT(context) ((EVP_CIPHER_CTX*)((ptrdiff_t)(context)))
+
+/**
+ * A helper macro to convert the EVP_CIPHER_CTX pointer to the 
+ * java 'context-handle'.
+ */
+#define JLONG(context) ((jlong)((ptrdiff_t)(context)))
+
+#define KEY_LENGTH_128 16
+#define KEY_LENGTH_256 32
+#define IV_LENGTH 16
+
+#define ENCRYPT_MODE 1
+#define DECRYPT_MODE 0
+
+/** Currently only support AES/CTR/NoPadding. */
+#define AES_CTR 0
+#define NOPADDING 0
+#define PKCSPADDING 1
+
+#endif //ORG_APACHE_HADOOP_CRYPTO_H
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
new file mode 100644
index 0000000..6c31d10
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/OpensslSecureRandom.c
@@ -0,0 +1,335 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "org_apache_hadoop_crypto_random.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef UNIX
+#include <pthread.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#endif
+
+#ifdef WINDOWS
+#include <windows.h>
+#endif
+ 
+#include "org_apache_hadoop_crypto_random_OpensslSecureRandom.h"
+
+#ifdef UNIX
+static void * (*dlsym_CRYPTO_malloc) (int, const char *, int);
+static void (*dlsym_CRYPTO_free) (void *);
+static int (*dlsym_CRYPTO_num_locks) (void);
+static void (*dlsym_CRYPTO_set_locking_callback) (void (*)());
+static void (*dlsym_CRYPTO_set_id_callback) (unsigned long (*)());
+static void (*dlsym_ENGINE_load_rdrand) (void);
+static ENGINE * (*dlsym_ENGINE_by_id) (const char *);
+static int (*dlsym_ENGINE_init) (ENGINE *);
+static int (*dlsym_ENGINE_set_default) (ENGINE *, unsigned int);
+static int (*dlsym_ENGINE_finish) (ENGINE *);
+static int (*dlsym_ENGINE_free) (ENGINE *);
+static void (*dlsym_ENGINE_cleanup) (void);
+static int (*dlsym_RAND_bytes) (unsigned char *, int);
+static unsigned long (*dlsym_ERR_get_error) (void);
+#endif
+
+#ifdef WINDOWS
+typedef void * (__cdecl *__dlsym_CRYPTO_malloc) (int, const char *, int);
+typedef void (__cdecl *__dlsym_CRYPTO_free) (void *);
+typedef int (__cdecl *__dlsym_CRYPTO_num_locks) (void);
+typedef void (__cdecl *__dlsym_CRYPTO_set_locking_callback)  \
+              (void (*)(int, int, char *, int);
+typedef void (__cdecl *__dlsym_ENGINE_load_rdrand) (void);
+typedef ENGINE * (__cdecl *__dlsym_ENGINE_by_id) (const char *);
+typedef int (__cdecl *__dlsym_ENGINE_init) (ENGINE *);
+typedef int (__cdecl *__dlsym_ENGINE_set_default) (ENGINE *, unsigned int);
+typedef int (__cdecl *__dlsym_ENGINE_finish) (ENGINE *);
+typedef int (__cdecl *__dlsym_ENGINE_free) (ENGINE *);
+typedef void (__cdecl *__dlsym_ENGINE_cleanup) (void);
+typedef int (__cdecl *__dlsym_RAND_bytes) (unsigned char *, int);
+typedef unsigned long (__cdecl *__dlsym_ERR_get_error) (void);
+static __dlsym_CRYPTO_malloc dlsym_CRYPTO_malloc;
+static __dlsym_CRYPTO_free dlsym_CRYPTO_free;
+static __dlsym_CRYPTO_num_locks dlsym_CRYPTO_num_locks;
+static __dlsym_CRYPTO_set_locking_callback dlsym_CRYPTO_set_locking_callback;
+static __dlsym_ENGINE_load_rdrand dlsym_ENGINE_load_rdrand;
+static __dlsym_ENGINE_by_id dlsym_ENGINE_by_id;
+static __dlsym_ENGINE_init dlsym_ENGINE_init;
+static __dlsym_ENGINE_set_default dlsym_ENGINE_set_default;
+static __dlsym_ENGINE_finish dlsym_ENGINE_finish;
+static __dlsym_ENGINE_free dlsym_ENGINE_free;
+static __dlsym_ENGINE_cleanup dlsym_ENGINE_cleanup;
+static __dlsym_RAND_bytes dlsym_RAND_bytes;
+static __dlsym_ERR_get_error dlsym_ERR_get_error;
+#endif
+
+static ENGINE * openssl_rand_init(void);
+static void openssl_rand_clean(ENGINE *eng, int clean_locks);
+static int openssl_rand_bytes(unsigned char *buf, int num);
+
+JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_random_OpensslSecureRandom_initSR
+    (JNIEnv *env, jclass clazz)
+{
+  char msg[1000];
+#ifdef UNIX
+  void *openssl = dlopen(HADOOP_OPENSSL_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
+#endif
+
+#ifdef WINDOWS
+  HMODULE openssl = LoadLibrary(HADOOP_OPENSSL_LIBRARY);
+#endif
+
+  if (!openssl) {
+    snprintf(msg, sizeof(msg), "Cannot load %s (%s)!", HADOOP_OPENSSL_LIBRARY,  \
+        dlerror());
+    THROW(env, "java/lang/UnsatisfiedLinkError", msg);
+    return;
+  }
+
+#ifdef UNIX
+  dlerror();  // Clear any existing error
+  LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_malloc, env, openssl, "CRYPTO_malloc");
+  LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_free, env, openssl, "CRYPTO_free");
+  LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_num_locks, env, openssl, "CRYPTO_num_locks");
+  LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_set_locking_callback,  \
+                      env, openssl, "CRYPTO_set_locking_callback");
+  LOAD_DYNAMIC_SYMBOL(dlsym_CRYPTO_set_id_callback, env,  \
+                      openssl, "CRYPTO_set_id_callback");
+  LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_load_rdrand, env,  \
+                      openssl, "ENGINE_load_rdrand");
+  LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_by_id, env, openssl, "ENGINE_by_id");
+  LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_init, env, openssl, "ENGINE_init");
+  LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_set_default, env,  \
+                      openssl, "ENGINE_set_default");
+  LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_finish, env, openssl, "ENGINE_finish");
+  LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_free, env, openssl, "ENGINE_free");
+  LOAD_DYNAMIC_SYMBOL(dlsym_ENGINE_cleanup, env, openssl, "ENGINE_cleanup");
+  LOAD_DYNAMIC_SYMBOL(dlsym_RAND_bytes, env, openssl, "RAND_bytes");
+  LOAD_DYNAMIC_SYMBOL(dlsym_ERR_get_error, env, openssl, "ERR_get_error");
+#endif
+
+#ifdef WINDOWS
+  LOAD_DYNAMIC_SYMBOL(__dlsym_CRYPTO_malloc, dlsym_CRYPTO_malloc,  \
+                      env, openssl, "CRYPTO_malloc");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_CRYPTO_free, dlsym_CRYPTO_free,  \
+                      env, openssl, "CRYPTO_free");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_CRYPTO_num_locks, dlsym_CRYPTO_num_locks,  \
+                      env, openssl, "CRYPTO_num_locks");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_CRYPTO_set_locking_callback,  \
+                      dlsym_CRYPTO_set_locking_callback,  \
+                      env, openssl, "CRYPTO_set_locking_callback");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_ENGINE_load_rdrand, dlsym_ENGINE_load_rdrand,  \
+                      env, openssl, "ENGINE_load_rdrand");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_ENGINE_by_id, dlsym_ENGINE_by_id,  \
+                      env, openssl, "ENGINE_by_id");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_ENGINE_init, dlsym_ENGINE_init,  \
+                      env, openssl, "ENGINE_init");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_ENGINE_set_default, dlsym_ENGINE_set_default,  \
+                      env, openssl, "ENGINE_set_default");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_ENGINE_finish, dlsym_ENGINE_finish,  \
+                      env, openssl, "ENGINE_finish");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_ENGINE_free, dlsym_ENGINE_free,  \
+                      env, openssl, "ENGINE_free");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_ENGINE_cleanup, dlsym_ENGINE_cleanup,  \
+                      env, openssl, "ENGINE_cleanup");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_RAND_bytes, dlsym_RAND_bytes,  \
+                      env, openssl, "RAND_bytes");
+  LOAD_DYNAMIC_SYMBOL(__dlsym_ERR_get_error, dlsym_ERR_get_error,  \
+                      env, openssl, "ERR_get_error");
+#endif
+
+  openssl_rand_init();
+}
+
+JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_crypto_random_OpensslSecureRandom_nextRandBytes___3B
+    (JNIEnv *env, jobject object, jbyteArray bytes)
+{
+  if (NULL == bytes) {
+    THROW(env, "java/lang/NullPointerException", "Buffer cannot be null.");
+    return JNI_FALSE;
+  }
+  jbyte *b = (*env)->GetByteArrayElements(env, bytes, NULL);
+  if (NULL == b) {
+    THROW(env, "java/lang/InternalError", "Cannot get bytes array.");
+    return JNI_FALSE;
+  }
+  int b_len = (*env)->GetArrayLength(env, bytes);
+  int ret = openssl_rand_bytes((unsigned char *)b, b_len);
+  (*env)->ReleaseByteArrayElements(env, bytes, b, 0);
+  
+  if (1 != ret) {
+    return JNI_FALSE;
+  }
+  return JNI_TRUE;
+}
+
+/**
+ * To ensure thread safety for random number generators, we need to call 
+ * CRYPTO_set_locking_callback.
+ * http://wiki.openssl.org/index.php/Random_Numbers
+ * Example: crypto/threads/mttest.c
+ */
+
+#ifdef WINDOWS
+static void windows_locking_callback(int mode, int type, char *file, int line);
+static HANDLE *lock_cs;
+
+static void locks_setup(void)
+{
+  int i;
+  lock_cs = dlsym_CRYPTO_malloc(dlsym_CRYPTO_num_locks() * sizeof(HANDLE),  \
+      __FILE__, __LINE__);
+
+  for (i = 0; i < dlsym_CRYPTO_num_locks(); i++) {
+    lock_cs[i] = CreateMutex(NULL, FALSE, NULL);
+  }
+  dlsym_CRYPTO_set_locking_callback((void (*)(int, int, char *, int))  \
+      windows_locking_callback);
+  /* id callback defined */
+}
+
+static void locks_cleanup(void)
+{
+  int i;
+  dlsym_CRYPTO_set_locking_callback(NULL);
+
+  for (i = 0; i < dlsym_CRYPTO_num_locks(); i++) {
+    CloseHandle(lock_cs[i]);
+  }
+  dlsym_CRYPTO_free(lock_cs);
+}
+
+static void windows_locking_callback(int mode, int type, char *file, int line)
+{
+  UNUSED(file), UNUSED(line);
+  
+  if (mode & CRYPTO_LOCK) {
+    WaitForSingleObject(lock_cs[type], INFINITE);
+  } else {
+    ReleaseMutex(lock_cs[type]);
+  }
+}
+#endif /* WINDOWS */
+
+#ifdef UNIX
+static void pthreads_locking_callback(int mode, int type, char *file, int line);
+static unsigned long pthreads_thread_id(void);
+static pthread_mutex_t *lock_cs;
+
+static void locks_setup(void)
+{
+  int i;
+  lock_cs = dlsym_CRYPTO_malloc(dlsym_CRYPTO_num_locks() *  \
+      sizeof(pthread_mutex_t), __FILE__, __LINE__);
+
+  for (i = 0; i < dlsym_CRYPTO_num_locks(); i++) {
+    pthread_mutex_init(&(lock_cs[i]), NULL);
+  }
+  
+  dlsym_CRYPTO_set_id_callback((unsigned long (*)())pthreads_thread_id);
+  dlsym_CRYPTO_set_locking_callback((void (*)())pthreads_locking_callback);
+}
+
+static void locks_cleanup(void)
+{
+  int i;
+  dlsym_CRYPTO_set_locking_callback(NULL);
+  
+  for (i = 0; i < dlsym_CRYPTO_num_locks(); i++) {
+    pthread_mutex_destroy(&(lock_cs[i]));
+  }
+  
+  dlsym_CRYPTO_free(lock_cs);
+}
+
+static void pthreads_locking_callback(int mode, int type, char *file, int line)
+{
+  UNUSED(file), UNUSED(line);
+  
+  if (mode & CRYPTO_LOCK) {
+    pthread_mutex_lock(&(lock_cs[type]));
+  } else {
+    pthread_mutex_unlock(&(lock_cs[type]));
+  }
+}
+
+static unsigned long pthreads_thread_id(void)
+{
+  return (unsigned long)syscall(SYS_gettid);
+}
+
+#endif /* UNIX */
+
+/**
+ * If using an Intel chipset with RDRAND, the high-performance hardware
+ * random number generator will be used.
+ */
+static ENGINE * openssl_rand_init(void)
+{
+  locks_setup();
+  
+  dlsym_ENGINE_load_rdrand();
+  ENGINE *eng = dlsym_ENGINE_by_id("rdrand");
+  
+  int ret = -1;
+  do {
+    if (NULL == eng) {
+      break;
+    }
+    
+    int rc = dlsym_ENGINE_init(eng);
+    if (0 == rc) {
+      break;
+    }
+    
+    rc = dlsym_ENGINE_set_default(eng, ENGINE_METHOD_RAND);
+    if (0 == rc) {
+      break;
+    }
+  
+    ret = 0;
+  } while(0);
+  
+  if (ret == -1) {
+    openssl_rand_clean(eng, 0);
+  }
+  
+  return eng;
+}
+
+static void openssl_rand_clean(ENGINE *eng, int clean_locks)
+{
+  if (NULL != eng) {
+    dlsym_ENGINE_finish(eng);
+    dlsym_ENGINE_free(eng);
+  }
+    
+  dlsym_ENGINE_cleanup();
+  if (clean_locks) {
+    locks_cleanup();
+  }
+}
+
+static int openssl_rand_bytes(unsigned char *buf, int num)
+{
+  return dlsym_RAND_bytes(buf, num);
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/org_apache_hadoop_crypto_random.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/org_apache_hadoop_crypto_random.h
new file mode 100644
index 0000000..1200718
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/crypto/random/org_apache_hadoop_crypto_random.h
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+#ifndef ORG_APACHE_HADOOP_CRYPTO_RANDOM_H
+#define ORG_APACHE_HADOOP_CRYPTO_RANDOM_H
+
+#include "org_apache_hadoop.h"
+
+#ifdef UNIX
+#include <dlfcn.h>
+#include "config.h"
+#endif
+
+#ifdef WINDOWS
+#include "winutils.h"
+#endif
+
+#define UNUSED(x) ((void)(x))
+
+#include <openssl/crypto.h>
+#include <openssl/engine.h>
+#include <openssl/rand.h>
+#include <openssl/err.h>
+
+#endif //ORG_APACHE_HADOOP_CRYPTO_RANDOM_H
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c
index d03050c..3625112 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c
@@ -39,6 +39,16 @@
 #endif
 }
 
+JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_buildSupportsOpenssl
+  (JNIEnv *env, jclass clazz)
+{
+#ifdef HADOOP_OPENSSL_LIBRARY
+  return JNI_TRUE;
+#else
+  return JNI_FALSE;
+#endif
+}
+
 JNIEXPORT jstring JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_getLibraryName
   (JNIEnv *env, jclass clazz)
 {
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index eeb2bb2..3cc7545 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1445,6 +1445,74 @@
     true.
   </description>
 </property>
+
+<property>
+  <name>hadoop.security.crypto.codec.classes.EXAMPLECIPHERSUITE</name>
+  <value></value>
+  <description>
+    The prefix for a given crypto codec, contains a comma-separated
+    list of implementation classes for a given crypto codec (eg EXAMPLECIPHERSUITE).
+    The first implementation will be used if available, others are fallbacks.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.crypto.codec.classes.aes.ctr.nopadding</name>
+  <value>org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec,org.apache.hadoop.crypto.JceAesCtrCryptoCodec</value>
+  <description>
+    Comma-separated list of crypto codec implementations for AES/CTR/NoPadding. 
+    The first implementation will be used if available, others are fallbacks.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.crypto.cipher.suite</name>
+  <value>AES/CTR/NoPadding</value>
+  <description>
+    Cipher suite for crypto codec.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.crypto.jce.provider</name>
+  <value></value>
+  <description>
+    The JCE provider name used in CryptoCodec. 
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.crypto.buffer.size</name>
+  <value>8192</value>
+  <description>
+    The buffer size used by CryptoInputStream and CryptoOutputStream. 
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.java.secure.random.algorithm</name>
+  <value>SHA1PRNG</value>
+  <description>
+    The java secure random algorithm. 
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.secure.random.impl</name>
+  <value></value>
+  <description>
+    Implementation of secure random. 
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.random.device.file.path</name>
+  <value>/dev/urandom</value>
+  <description>
+    OS security random device file path.
+  </description>
+</property>
+
 <property>
   <name>fs.har.impl.disable.cache</name>
   <value>true</value>
@@ -1483,4 +1551,5 @@
     key will be dropped. Default = 12hrs
   </description>
 </property>
+
 </configuration>
diff --git a/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
index 48e0b21..53e42cb 100644
--- a/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
+++ b/hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
@@ -168,15 +168,22 @@
    Copy files from source to destination. This command allows multiple sources
    as well in which case the destination must be a directory.
 
+   'raw.*' namespace extended attributes are preserved if (1) the source and
+   destination filesystems support them (HDFS only), and (2) all source and
+   destination pathnames are in the /.reserved/raw hierarchy. Determination of
+   whether raw.* namespace xattrs are preserved is independent of the
+   -p (preserve) flag.
+
     Options:
 
       * The -f option will overwrite the destination if it already exists.
       
-      * The -p option will preserve file attributes [topx] (timestamps, 
+      * The -p option will preserve file attributes [topx] (timestamps,
         ownership, permission, ACL, XAttr). If -p is specified with no <arg>,
         then preserves timestamps, ownership, permission. If -pa is specified,
         then preserves permission also because ACL is a super-set of
-        permission.
+        permission. Determination of whether raw namespace extended attributes
+        are preserved is independent of the -p flag.
 
    Example:
 
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
new file mode 100644
index 0000000..f5acc73
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/CryptoStreamsTestBase.java
@@ -0,0 +1,721 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.EnumSet;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.ByteBufferReadable;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.ReadOption;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.fs.Syncable;
+import org.apache.hadoop.io.ByteBufferPool;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.RandomDatum;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public abstract class CryptoStreamsTestBase {
+  protected static final Log LOG = LogFactory.getLog(
+      CryptoStreamsTestBase.class);
+
+  protected static CryptoCodec codec;
+  private static final byte[] key = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+    0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16};
+  private static final byte[] iv = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+    0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08};
+  
+  protected static final int count = 10000;
+  protected static int defaultBufferSize = 8192;
+  protected static int smallBufferSize = 1024;
+  private byte[] data;
+  private int dataLen;
+  
+  @Before
+  public void setUp() throws IOException {
+    // Generate data
+    final int seed = new Random().nextInt();
+    final DataOutputBuffer dataBuf = new DataOutputBuffer();
+    final RandomDatum.Generator generator = new RandomDatum.Generator(seed);
+    for(int i = 0; i < count; ++i) {
+      generator.next();
+      final RandomDatum key = generator.getKey();
+      final RandomDatum value = generator.getValue();
+      
+      key.write(dataBuf);
+      value.write(dataBuf);
+    }
+    LOG.info("Generated " + count + " records");
+    data = dataBuf.getData();
+    dataLen = dataBuf.getLength();
+  }
+  
+  protected void writeData(OutputStream out) throws Exception {
+    out.write(data, 0, dataLen);
+    out.close();
+  }
+  
+  protected int getDataLen() {
+    return dataLen;
+  }
+  
+  private int readAll(InputStream in, byte[] b, int off, int len) 
+      throws IOException {
+    int n = 0;
+    int total = 0;
+    while (n != -1) {
+      total += n;
+      if (total >= len) {
+        break;
+      }
+      n = in.read(b, off + total, len - total);
+    }
+    
+    return total;
+  }
+  
+  protected OutputStream getOutputStream(int bufferSize) throws IOException {
+    return getOutputStream(bufferSize, key, iv);
+  }
+  
+  protected abstract OutputStream getOutputStream(int bufferSize, byte[] key, 
+      byte[] iv) throws IOException;
+  
+  protected InputStream getInputStream(int bufferSize) throws IOException {
+    return getInputStream(bufferSize, key, iv);
+  }
+  
+  protected abstract InputStream getInputStream(int bufferSize, byte[] key, 
+      byte[] iv) throws IOException;
+  
+  /** Test crypto reading with different buffer size. */
+  @Test(timeout=120000)
+  public void testRead() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    // Default buffer size
+    InputStream in = getInputStream(defaultBufferSize);
+    readCheck(in);
+    in.close();
+    
+    // Small buffer size
+    in = getInputStream(smallBufferSize);
+    readCheck(in);
+    in.close();
+  }
+  
+  private void readCheck(InputStream in) throws Exception {
+    byte[] result = new byte[dataLen];
+    int n = readAll(in, result, 0, dataLen);
+    
+    Assert.assertEquals(dataLen, n);
+    byte[] expectedData = new byte[n];
+    System.arraycopy(data, 0, expectedData, 0, n);
+    Assert.assertArrayEquals(result, expectedData);
+    
+    // EOF
+    n = in.read(result, 0, dataLen);
+    Assert.assertEquals(n, -1);
+    in.close();
+  }
+  
+  /** Test crypto writing with different buffer size. */
+  @Test(timeout = 120000)
+  public void testWrite() throws Exception {
+    // Default buffer size
+    writeCheck(defaultBufferSize);
+
+    // Small buffer size
+    writeCheck(smallBufferSize);
+  }
+
+  private void writeCheck(int bufferSize) throws Exception {
+    OutputStream out = getOutputStream(bufferSize);
+    writeData(out);
+
+    if (out instanceof FSDataOutputStream) {
+      Assert.assertEquals(((FSDataOutputStream) out).getPos(), getDataLen());
+    }
+  }
+
+  /** Test crypto with different IV. */
+  @Test(timeout=120000)
+  public void testCryptoIV() throws Exception {
+    byte[] iv1 = iv.clone();
+    
+    // Counter base: Long.MAX_VALUE
+    setCounterBaseForIV(iv1, Long.MAX_VALUE);
+    cryptoCheck(iv1);
+    
+    // Counter base: Long.MAX_VALUE - 1
+    setCounterBaseForIV(iv1, Long.MAX_VALUE - 1);
+    cryptoCheck(iv1);
+    
+    // Counter base: Integer.MAX_VALUE
+    setCounterBaseForIV(iv1, Integer.MAX_VALUE);
+    cryptoCheck(iv1);
+    
+    // Counter base: 0
+    setCounterBaseForIV(iv1, 0);
+    cryptoCheck(iv1);
+    
+    // Counter base: -1
+    setCounterBaseForIV(iv1, -1);
+    cryptoCheck(iv1);
+  }
+  
+  private void cryptoCheck(byte[] iv) throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize, key, iv);
+    writeData(out);
+    
+    InputStream in = getInputStream(defaultBufferSize, key, iv);
+    readCheck(in);
+    in.close();
+  }
+  
+  private void setCounterBaseForIV(byte[] iv, long counterBase) {
+    ByteBuffer buf = ByteBuffer.wrap(iv);
+    buf.order(ByteOrder.BIG_ENDIAN);
+    buf.putLong(iv.length - 8, counterBase);
+  }
+  
+  /**
+   * Test hflush/hsync of crypto output stream, and with different buffer size.
+   */
+  @Test(timeout=120000)
+  public void testSyncable() throws IOException {
+    syncableCheck();
+  }
+  
+  private void syncableCheck() throws IOException {
+    OutputStream out = getOutputStream(smallBufferSize);
+    try {
+      int bytesWritten = dataLen / 3;
+      out.write(data, 0, bytesWritten);
+      ((Syncable) out).hflush();
+      
+      InputStream in = getInputStream(defaultBufferSize);
+      verify(in, bytesWritten, data);
+      in.close();
+      
+      out.write(data, bytesWritten, dataLen - bytesWritten);
+      ((Syncable) out).hsync();
+      
+      in = getInputStream(defaultBufferSize);
+      verify(in, dataLen, data);
+      in.close();
+    } finally {
+      out.close();
+    }
+  }
+  
+  private void verify(InputStream in, int bytesToVerify, 
+      byte[] expectedBytes) throws IOException {
+    final byte[] readBuf = new byte[bytesToVerify];
+    readAll(in, readBuf, 0, bytesToVerify);
+    for (int i = 0; i < bytesToVerify; i++) {
+      Assert.assertEquals(expectedBytes[i], readBuf[i]);
+    }
+  }
+  
+  private int readAll(InputStream in, long pos, byte[] b, int off, int len) 
+      throws IOException {
+    int n = 0;
+    int total = 0;
+    while (n != -1) {
+      total += n;
+      if (total >= len) {
+        break;
+      }
+      n = ((PositionedReadable) in).read(pos + total, b, off + total, 
+          len - total);
+    }
+    
+    return total;
+  }
+  
+  /** Test positioned read. */
+  @Test(timeout=120000)
+  public void testPositionedRead() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    InputStream in = getInputStream(defaultBufferSize);
+    // Pos: 1/3 dataLen
+    positionedReadCheck(in , dataLen / 3);
+
+    // Pos: 1/2 dataLen
+    positionedReadCheck(in, dataLen / 2);
+    in.close();
+  }
+  
+  private void positionedReadCheck(InputStream in, int pos) throws Exception {
+    byte[] result = new byte[dataLen];
+    int n = readAll(in, pos, result, 0, dataLen);
+    
+    Assert.assertEquals(dataLen, n + pos);
+    byte[] readData = new byte[n];
+    System.arraycopy(result, 0, readData, 0, n);
+    byte[] expectedData = new byte[n];
+    System.arraycopy(data, pos, expectedData, 0, n);
+    Assert.assertArrayEquals(readData, expectedData);
+  }
+  
+  /** Test read fully */
+  @Test(timeout=120000)
+  public void testReadFully() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    InputStream in = getInputStream(defaultBufferSize);
+    final int len1 = dataLen / 4;
+    // Read len1 bytes
+    byte[] readData = new byte[len1];
+    readAll(in, readData, 0, len1);
+    byte[] expectedData = new byte[len1];
+    System.arraycopy(data, 0, expectedData, 0, len1);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    // Pos: 1/3 dataLen
+    readFullyCheck(in, dataLen / 3);
+    
+    // Read len1 bytes
+    readData = new byte[len1];
+    readAll(in, readData, 0, len1);
+    expectedData = new byte[len1];
+    System.arraycopy(data, len1, expectedData, 0, len1);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    // Pos: 1/2 dataLen
+    readFullyCheck(in, dataLen / 2);
+    
+    // Read len1 bytes
+    readData = new byte[len1];
+    readAll(in, readData, 0, len1);
+    expectedData = new byte[len1];
+    System.arraycopy(data, 2 * len1, expectedData, 0, len1);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    in.close();
+  }
+  
+  private void readFullyCheck(InputStream in, int pos) throws Exception {
+    byte[] result = new byte[dataLen - pos];
+    ((PositionedReadable) in).readFully(pos, result);
+    
+    byte[] expectedData = new byte[dataLen - pos];
+    System.arraycopy(data, pos, expectedData, 0, dataLen - pos);
+    Assert.assertArrayEquals(result, expectedData);
+    
+    result = new byte[dataLen]; // Exceeds maximum length 
+    try {
+      ((PositionedReadable) in).readFully(pos, result);
+      Assert.fail("Read fully exceeds maximum length should fail.");
+    } catch (IOException e) {
+    }
+  }
+  
+  /** Test seek to different position. */
+  @Test(timeout=120000)
+  public void testSeek() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    InputStream in = getInputStream(defaultBufferSize);
+    // Pos: 1/3 dataLen
+    seekCheck(in, dataLen / 3);
+    
+    // Pos: 0
+    seekCheck(in, 0);
+    
+    // Pos: 1/2 dataLen
+    seekCheck(in, dataLen / 2);
+    
+    final long pos = ((Seekable) in).getPos();
+    
+    // Pos: -3
+    try {
+      seekCheck(in, -3);
+      Assert.fail("Seek to negative offset should fail.");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains("Cannot seek to negative " +
+          "offset", e);
+    }
+    Assert.assertEquals(pos, ((Seekable) in).getPos());
+    
+    // Pos: dataLen + 3
+    try {
+      seekCheck(in, dataLen + 3);
+      Assert.fail("Seek after EOF should fail.");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Cannot seek after EOF", e);
+    }
+    Assert.assertEquals(pos, ((Seekable) in).getPos());
+    
+    in.close();
+  }
+  
+  private void seekCheck(InputStream in, int pos) throws Exception {
+    byte[] result = new byte[dataLen];
+    ((Seekable) in).seek(pos);
+    int n = readAll(in, result, 0, dataLen);
+    
+    Assert.assertEquals(dataLen, n + pos);
+    byte[] readData = new byte[n];
+    System.arraycopy(result, 0, readData, 0, n);
+    byte[] expectedData = new byte[n];
+    System.arraycopy(data, pos, expectedData, 0, n);
+    Assert.assertArrayEquals(readData, expectedData);
+  }
+  
+  /** Test get position. */
+  @Test(timeout=120000)
+  public void testGetPos() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    // Default buffer size
+    InputStream in = getInputStream(defaultBufferSize);
+    byte[] result = new byte[dataLen];
+    int n1 = readAll(in, result, 0, dataLen / 3);
+    Assert.assertEquals(n1, ((Seekable) in).getPos());
+    
+    int n2 = readAll(in, result, n1, dataLen - n1);
+    Assert.assertEquals(n1 + n2, ((Seekable) in).getPos());
+    in.close();
+  }
+  
+  @Test(timeout=120000)
+  public void testAvailable() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    // Default buffer size
+    InputStream in = getInputStream(defaultBufferSize);
+    byte[] result = new byte[dataLen];
+    int n1 = readAll(in, result, 0, dataLen / 3);
+    Assert.assertEquals(in.available(), dataLen - n1);
+    
+    int n2 = readAll(in, result, n1, dataLen - n1);
+    Assert.assertEquals(in.available(), dataLen - n1 - n2);
+    in.close();
+  }
+  
+  /** Test skip. */
+  @Test(timeout=120000)
+  public void testSkip() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+        
+    // Default buffer size
+    InputStream in = getInputStream(defaultBufferSize);
+    byte[] result = new byte[dataLen];
+    int n1 = readAll(in, result, 0, dataLen / 3);
+    Assert.assertEquals(n1, ((Seekable) in).getPos());
+    
+    long skipped = in.skip(dataLen / 3);
+    int n2 = readAll(in, result, 0, dataLen);
+    
+    Assert.assertEquals(dataLen, n1 + skipped + n2);
+    byte[] readData = new byte[n2];
+    System.arraycopy(result, 0, readData, 0, n2);
+    byte[] expectedData = new byte[n2];
+    System.arraycopy(data, dataLen - n2, expectedData, 0, n2);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    try {
+      skipped = in.skip(-3);
+      Assert.fail("Skip Negative length should fail.");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains("Negative skip length", e);
+    }
+    
+    // Skip after EOF
+    skipped = in.skip(3);
+    Assert.assertEquals(skipped, 0);
+    
+    in.close();
+  }
+  
+  private void byteBufferReadCheck(InputStream in, ByteBuffer buf, 
+      int bufPos) throws Exception {
+    buf.position(bufPos);
+    int n = ((ByteBufferReadable) in).read(buf);
+    byte[] readData = new byte[n];
+    buf.rewind();
+    buf.position(bufPos);
+    buf.get(readData);
+    byte[] expectedData = new byte[n];
+    System.arraycopy(data, 0, expectedData, 0, n);
+    Assert.assertArrayEquals(readData, expectedData);
+  }
+  
+  /** Test byte buffer read with different buffer size. */
+  @Test(timeout=120000)
+  public void testByteBufferRead() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    // Default buffer size, initial buffer position is 0
+    InputStream in = getInputStream(defaultBufferSize);
+    ByteBuffer buf = ByteBuffer.allocate(dataLen + 100);
+    byteBufferReadCheck(in, buf, 0);
+    in.close();
+    
+    // Default buffer size, initial buffer position is not 0
+    in = getInputStream(defaultBufferSize);
+    buf.clear();
+    byteBufferReadCheck(in, buf, 11);
+    in.close();
+    
+    // Small buffer size, initial buffer position is 0
+    in = getInputStream(smallBufferSize);
+    buf.clear();
+    byteBufferReadCheck(in, buf, 0);
+    in.close();
+    
+    // Small buffer size, initial buffer position is not 0
+    in = getInputStream(smallBufferSize);
+    buf.clear();
+    byteBufferReadCheck(in, buf, 11);
+    in.close();
+    
+    // Direct buffer, default buffer size, initial buffer position is 0
+    in = getInputStream(defaultBufferSize);
+    buf = ByteBuffer.allocateDirect(dataLen + 100);
+    byteBufferReadCheck(in, buf, 0);
+    in.close();
+    
+    // Direct buffer, default buffer size, initial buffer position is not 0
+    in = getInputStream(defaultBufferSize);
+    buf.clear();
+    byteBufferReadCheck(in, buf, 11);
+    in.close();
+    
+    // Direct buffer, small buffer size, initial buffer position is 0
+    in = getInputStream(smallBufferSize);
+    buf.clear();
+    byteBufferReadCheck(in, buf, 0);
+    in.close();
+    
+    // Direct buffer, small buffer size, initial buffer position is not 0
+    in = getInputStream(smallBufferSize);
+    buf.clear();
+    byteBufferReadCheck(in, buf, 11);
+    in.close();
+  }
+  
+  @Test(timeout=120000)
+  public void testCombinedOp() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    final int len1 = dataLen / 8;
+    final int len2 = dataLen / 10;
+    
+    InputStream in = getInputStream(defaultBufferSize);
+    // Read len1 data.
+    byte[] readData = new byte[len1];
+    readAll(in, readData, 0, len1);
+    byte[] expectedData = new byte[len1];
+    System.arraycopy(data, 0, expectedData, 0, len1);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    long pos = ((Seekable) in).getPos();
+    Assert.assertEquals(len1, pos);
+    
+    // Seek forward len2
+    ((Seekable) in).seek(pos + len2);
+    // Skip forward len2
+    long n = in.skip(len2);
+    Assert.assertEquals(len2, n);
+    
+    // Pos: 1/4 dataLen
+    positionedReadCheck(in , dataLen / 4);
+    
+    // Pos should be len1 + len2 + len2
+    pos = ((Seekable) in).getPos();
+    Assert.assertEquals(len1 + len2 + len2, pos);
+    
+    // Read forward len1
+    ByteBuffer buf = ByteBuffer.allocate(len1);
+    int nRead = ((ByteBufferReadable) in).read(buf);
+    readData = new byte[nRead];
+    buf.rewind();
+    buf.get(readData);
+    expectedData = new byte[nRead];
+    System.arraycopy(data, (int)pos, expectedData, 0, nRead);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    // Pos should be len1 + 2 * len2 + nRead
+    pos = ((Seekable) in).getPos();
+    Assert.assertEquals(len1 + 2 * len2 + nRead, pos);
+    
+    // Pos: 1/3 dataLen
+    positionedReadCheck(in , dataLen / 3);
+    
+    // Read forward len1
+    readData = new byte[len1];
+    readAll(in, readData, 0, len1);
+    expectedData = new byte[len1];
+    System.arraycopy(data, (int)pos, expectedData, 0, len1);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    // Pos should be 2 * len1 + 2 * len2 + nRead
+    pos = ((Seekable) in).getPos();
+    Assert.assertEquals(2 * len1 + 2 * len2 + nRead, pos);
+    
+    // Read forward len1
+    buf = ByteBuffer.allocate(len1);
+    nRead = ((ByteBufferReadable) in).read(buf);
+    readData = new byte[nRead];
+    buf.rewind();
+    buf.get(readData);
+    expectedData = new byte[nRead];
+    System.arraycopy(data, (int)pos, expectedData, 0, nRead);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    // ByteBuffer read after EOF
+    ((Seekable) in).seek(dataLen);
+    buf.clear();
+    n = ((ByteBufferReadable) in).read(buf);
+    Assert.assertEquals(n, -1);
+    
+    in.close();
+  }
+  
+  @Test(timeout=120000)
+  public void testSeekToNewSource() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    InputStream in = getInputStream(defaultBufferSize);
+    
+    final int len1 = dataLen / 8;
+    byte[] readData = new byte[len1];
+    readAll(in, readData, 0, len1);
+    
+    // Pos: 1/3 dataLen
+    seekToNewSourceCheck(in, dataLen / 3);
+    
+    // Pos: 0
+    seekToNewSourceCheck(in, 0);
+    
+    // Pos: 1/2 dataLen
+    seekToNewSourceCheck(in, dataLen / 2);
+    
+    // Pos: -3
+    try {
+      seekToNewSourceCheck(in, -3);
+      Assert.fail("Seek to negative offset should fail.");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains("Cannot seek to negative " +
+          "offset", e);
+    }
+    
+    // Pos: dataLen + 3
+    try {
+      seekToNewSourceCheck(in, dataLen + 3);
+      Assert.fail("Seek after EOF should fail.");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Attempted to read past " +
+          "end of file", e);
+    }
+    
+    in.close();
+  }
+  
+  private void seekToNewSourceCheck(InputStream in, int targetPos) 
+      throws Exception {
+    byte[] result = new byte[dataLen];
+    ((Seekable) in).seekToNewSource(targetPos);
+    int n = readAll(in, result, 0, dataLen);
+    
+    Assert.assertEquals(dataLen, n + targetPos);
+    byte[] readData = new byte[n];
+    System.arraycopy(result, 0, readData, 0, n);
+    byte[] expectedData = new byte[n];
+    System.arraycopy(data, targetPos, expectedData, 0, n);
+    Assert.assertArrayEquals(readData, expectedData);
+  }
+  
+  private ByteBufferPool getBufferPool() {
+    return new ByteBufferPool() {
+      @Override
+      public ByteBuffer getBuffer(boolean direct, int length) {
+        return ByteBuffer.allocateDirect(length);
+      }
+      
+      @Override
+      public void putBuffer(ByteBuffer buffer) {
+      }
+    };
+  }
+  
+  @Test(timeout=120000)
+  public void testHasEnhancedByteBufferAccess() throws Exception {
+    OutputStream out = getOutputStream(defaultBufferSize);
+    writeData(out);
+    
+    InputStream in = getInputStream(defaultBufferSize);
+    final int len1 = dataLen / 8;
+    // ByteBuffer size is len1
+    ByteBuffer buffer = ((HasEnhancedByteBufferAccess) in).read(
+        getBufferPool(), len1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
+    int n1 = buffer.remaining();
+    byte[] readData = new byte[n1];
+    buffer.get(readData);
+    byte[] expectedData = new byte[n1];
+    System.arraycopy(data, 0, expectedData, 0, n1);
+    Assert.assertArrayEquals(readData, expectedData);
+    ((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer);
+    
+    // Read len1 bytes
+    readData = new byte[len1];
+    readAll(in, readData, 0, len1);
+    expectedData = new byte[len1];
+    System.arraycopy(data, n1, expectedData, 0, len1);
+    Assert.assertArrayEquals(readData, expectedData);
+    
+    // ByteBuffer size is len1
+    buffer = ((HasEnhancedByteBufferAccess) in).read(
+        getBufferPool(), len1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
+    int n2 = buffer.remaining();
+    readData = new byte[n2];
+    buffer.get(readData);
+    expectedData = new byte[n2];
+    System.arraycopy(data, n1 + len1, expectedData, 0, n2);
+    Assert.assertArrayEquals(readData, expectedData);
+    ((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer);
+    
+    in.close();
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java
new file mode 100644
index 0000000..49b5056
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoCodec.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.BufferedInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.security.GeneralSecurityException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.RandomDatum;
+import org.apache.hadoop.util.NativeCodeLoader;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Test;
+
+public class TestCryptoCodec {
+  private static final Log LOG= LogFactory.getLog(TestCryptoCodec.class);
+  private static final byte[] key = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+    0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16};
+  private static final byte[] iv = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+    0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08};
+  private static final int bufferSize = 4096;
+  
+  private Configuration conf = new Configuration();
+  private int count = 10000;
+  private int seed = new Random().nextInt();
+  
+  @Test(timeout=120000)
+  public void testJceAesCtrCryptoCodec() throws Exception {
+    cryptoCodecTest(conf, seed, 0, 
+        "org.apache.hadoop.crypto.JceAesCtrCryptoCodec");
+    cryptoCodecTest(conf, seed, count, 
+        "org.apache.hadoop.crypto.JceAesCtrCryptoCodec");
+  }
+  
+  @Test(timeout=1200000)
+  public void testOpensslAesCtrCryptoCodec() throws Exception {
+    Assume.assumeTrue(NativeCodeLoader.buildSupportsOpenssl());
+    Assert.assertEquals(null, OpensslCipher.getLoadingFailureReason());
+    cryptoCodecTest(conf, seed, 0, 
+        "org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec");
+    cryptoCodecTest(conf, seed, count, 
+        "org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec");
+  }
+  
+  private void cryptoCodecTest(Configuration conf, int seed, int count, 
+      String codecClass) throws IOException, GeneralSecurityException {
+    CryptoCodec codec = null;
+    try {
+      codec = (CryptoCodec)ReflectionUtils.newInstance(
+          conf.getClassByName(codecClass), conf);
+    } catch (ClassNotFoundException cnfe) {
+      throw new IOException("Illegal crypto codec!");
+    }
+    LOG.info("Created a Codec object of type: " + codecClass);
+    
+    // Generate data
+    DataOutputBuffer data = new DataOutputBuffer();
+    RandomDatum.Generator generator = new RandomDatum.Generator(seed);
+    for(int i = 0; i < count; ++i) {
+      generator.next();
+      RandomDatum key = generator.getKey();
+      RandomDatum value = generator.getValue();
+      
+      key.write(data);
+      value.write(data);
+    }
+    LOG.info("Generated " + count + " records");
+    
+    // Encrypt data
+    DataOutputBuffer encryptedDataBuffer = new DataOutputBuffer();
+    CryptoOutputStream out = new CryptoOutputStream(encryptedDataBuffer, 
+        codec, bufferSize, key, iv);
+    out.write(data.getData(), 0, data.getLength());
+    out.flush();
+    out.close();
+    LOG.info("Finished encrypting data");
+    
+    // Decrypt data
+    DataInputBuffer decryptedDataBuffer = new DataInputBuffer();
+    decryptedDataBuffer.reset(encryptedDataBuffer.getData(), 0, 
+        encryptedDataBuffer.getLength());
+    CryptoInputStream in = new CryptoInputStream(decryptedDataBuffer, 
+        codec, bufferSize, key, iv);
+    DataInputStream dataIn = new DataInputStream(new BufferedInputStream(in));
+    
+    // Check
+    DataInputBuffer originalData = new DataInputBuffer();
+    originalData.reset(data.getData(), 0, data.getLength());
+    DataInputStream originalIn = new DataInputStream(
+        new BufferedInputStream(originalData));
+    
+    for(int i=0; i < count; ++i) {
+      RandomDatum k1 = new RandomDatum();
+      RandomDatum v1 = new RandomDatum();
+      k1.readFields(originalIn);
+      v1.readFields(originalIn);
+      
+      RandomDatum k2 = new RandomDatum();
+      RandomDatum v2 = new RandomDatum();
+      k2.readFields(dataIn);
+      v2.readFields(dataIn);
+      assertTrue("original and encrypted-then-decrypted-output not equal",
+                 k1.equals(k2) && v1.equals(v2));
+      
+      // original and encrypted-then-decrypted-output have the same hashCode
+      Map<RandomDatum, String> m = new HashMap<RandomDatum, String>();
+      m.put(k1, k1.toString());
+      m.put(v1, v1.toString());
+      String result = m.get(k2);
+      assertEquals("k1 and k2 hashcode not equal", result, k1.toString());
+      result = m.get(v2);
+      assertEquals("v1 and v2 hashcode not equal", result, v1.toString());
+    }
+
+    // Decrypt data byte-at-a-time
+    originalData.reset(data.getData(), 0, data.getLength());
+    decryptedDataBuffer.reset(encryptedDataBuffer.getData(), 0, 
+        encryptedDataBuffer.getLength());
+    in = new CryptoInputStream(decryptedDataBuffer, 
+        codec, bufferSize, key, iv);
+
+    // Check
+    originalIn = new DataInputStream(new BufferedInputStream(originalData));
+    int expected;
+    do {
+      expected = originalIn.read();
+      assertEquals("Decrypted stream read by byte does not match",
+        expected, in.read());
+    } while (expected != -1);
+
+    LOG.info("SUCCESS! Completed checking " + count + " records");
+    
+    // Check secure random generator
+    testSecureRandom(codec);
+  }
+  
+  /** Test secure random generator */
+  private void testSecureRandom(CryptoCodec codec) {
+    // len = 16
+    checkSecureRandom(codec, 16);
+    // len = 32
+    checkSecureRandom(codec, 32);
+    // len = 128
+    checkSecureRandom(codec, 128);
+  }
+  
+  private void checkSecureRandom(CryptoCodec codec, int len) {
+    byte[] rand = new byte[len];
+    byte[] rand1 = new byte[len];
+    codec.generateSecureRandom(rand);
+    codec.generateSecureRandom(rand1);
+    
+    Assert.assertEquals(len, rand.length);
+    Assert.assertEquals(len, rand1.length);
+    Assert.assertFalse(Arrays.equals(rand, rand1));
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
new file mode 100644
index 0000000..ebe025b
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreams.java
@@ -0,0 +1,376 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.EOFException;
+import java.io.FileDescriptor;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.EnumSet;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ByteBufferReadable;
+import org.apache.hadoop.fs.CanSetDropBehind;
+import org.apache.hadoop.fs.CanSetReadahead;
+import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
+import org.apache.hadoop.fs.HasFileDescriptor;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.ReadOption;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.fs.Syncable;
+import org.apache.hadoop.io.ByteBufferPool;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+public class TestCryptoStreams extends CryptoStreamsTestBase {
+  /**
+   * Data storage.
+   * {@link #getOutputStream(int)} will write to this buf.
+   * {@link #getInputStream(int)} will read from this buf.
+   */
+  private byte[] buf;
+  private int bufLen;
+  
+  @BeforeClass
+  public static void init() throws Exception {
+    Configuration conf = new Configuration();
+    codec = CryptoCodec.getInstance(conf);
+  }
+  
+  @AfterClass
+  public static void shutdown() throws Exception {
+  }
+  
+  @Override
+  protected OutputStream getOutputStream(int bufferSize, byte[] key, byte[] iv) 
+      throws IOException {
+    DataOutputBuffer out = new DataOutputBuffer() {
+      @Override
+      public void flush() throws IOException {
+        buf = getData();
+        bufLen = getLength();
+      }
+      @Override
+      public void close() throws IOException {
+        buf = getData();
+        bufLen = getLength();
+      }
+    };
+    return new CryptoOutputStream(new FakeOutputStream(out),
+        codec, bufferSize, key, iv);
+  }
+  
+  @Override
+  protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv) 
+      throws IOException {
+    DataInputBuffer in = new DataInputBuffer();
+    in.reset(buf, 0, bufLen);
+    return new CryptoInputStream(new FakeInputStream(in), codec, bufferSize, 
+        key, iv);
+  }
+  
+  private class FakeOutputStream extends OutputStream 
+      implements Syncable, CanSetDropBehind{
+    private final byte[] oneByteBuf = new byte[1];
+    private final DataOutputBuffer out;
+    private boolean closed;
+    
+    public FakeOutputStream(DataOutputBuffer out) {
+      this.out = out;
+    }
+    
+    @Override
+    public void write(byte b[], int off, int len) throws IOException {
+      if (b == null) {
+        throw new NullPointerException();
+      } else if (off < 0 || len < 0 || len > b.length - off) {
+        throw new IndexOutOfBoundsException();
+      } else if (len == 0) {
+        return;
+      }
+      
+      checkStream();
+      
+      out.write(b, off, len);
+    }
+    
+    @Override
+    public void flush() throws IOException {
+      checkStream();
+      out.flush();
+    }
+    
+    @Override
+    public void close() throws IOException {
+      if (closed) {
+        return;
+      }
+      
+      out.close();
+      closed = true;
+    }
+
+    @Override
+    public void write(int b) throws IOException {
+      oneByteBuf[0] = (byte)(b & 0xff);
+      write(oneByteBuf, 0, oneByteBuf.length);
+    }
+
+    @Override
+    public void setDropBehind(Boolean dropCache) throws IOException,
+        UnsupportedOperationException {
+    }
+
+    @Override
+    public void hflush() throws IOException {
+      checkStream();
+      flush();
+    }
+
+    @Override
+    public void hsync() throws IOException {
+      checkStream();
+      flush();
+    }
+    
+    private void checkStream() throws IOException {
+      if (closed) {
+        throw new IOException("Stream is closed!");
+      }
+    }
+  }
+  
+  private class FakeInputStream extends InputStream implements 
+      Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, 
+      CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess {
+    private final byte[] oneByteBuf = new byte[1];
+    private int pos = 0;
+    private final byte[] data;
+    private final int length;
+    private boolean closed = false;
+
+    public FakeInputStream(DataInputBuffer in) {
+      data = in.getData();
+      length = in.getLength();
+    }
+    
+    @Override
+    public void seek(long pos) throws IOException {
+      if (pos > length) {
+        throw new IOException("Cannot seek after EOF.");
+      }
+      if (pos < 0) {
+        throw new IOException("Cannot seek to negative offset.");
+      }
+      checkStream();
+      this.pos = (int)pos;
+    }
+    
+    @Override
+    public long getPos() throws IOException {
+      return pos;
+    }
+    
+    @Override
+    public int available() throws IOException {
+      return length - pos;
+    }
+    
+    @Override
+    public int read(byte b[], int off, int len) throws IOException {
+      if (b == null) {
+        throw new NullPointerException();
+      } else if (off < 0 || len < 0 || len > b.length - off) {
+        throw new IndexOutOfBoundsException();
+      } else if (len == 0) {
+        return 0;
+      }
+      
+      checkStream();
+      
+      if (pos < length) {
+        int n = (int) Math.min(len, length - pos);
+        System.arraycopy(data, pos, b, off, n);
+        pos += n;
+        return n;
+      }
+      
+      return -1;
+    }
+    
+    private void checkStream() throws IOException {
+      if (closed) {
+        throw new IOException("Stream is closed!");
+      }
+    }
+    
+    @Override
+    public int read(ByteBuffer buf) throws IOException {
+      checkStream();
+      if (pos < length) {
+        int n = (int) Math.min(buf.remaining(), length - pos);
+        if (n > 0) {
+          buf.put(data, pos, n);
+        }
+        pos += n;
+        return n;
+      }
+      return -1;
+    }
+    
+    @Override
+    public long skip(long n) throws IOException {
+      checkStream();
+      if ( n > 0 ) {
+        if( n + pos > length ) {
+          n = length - pos;
+        }
+        pos += n;
+        return n;
+      }
+      return n < 0 ? -1 : 0;
+    }
+    
+    @Override
+    public void close() throws IOException {
+      closed = true;
+    }
+
+    @Override
+    public int read(long position, byte[] b, int off, int len)
+        throws IOException {
+      if (b == null) {
+        throw new NullPointerException();
+      } else if (off < 0 || len < 0 || len > b.length - off) {
+        throw new IndexOutOfBoundsException();
+      } else if (len == 0) {
+        return 0;
+      }
+      
+      if (position > length) {
+        throw new IOException("Cannot read after EOF.");
+      }
+      if (position < 0) {
+        throw new IOException("Cannot read to negative offset.");
+      }
+      
+      checkStream();
+      
+      if (position < length) {
+        int n = (int) Math.min(len, length - position);
+        System.arraycopy(data, (int)position, b, off, n);
+        return n;
+      }
+      
+      return -1;
+    }
+
+    @Override
+    public void readFully(long position, byte[] b, int off, int len)
+        throws IOException {
+      if (b == null) {
+        throw new NullPointerException();
+      } else if (off < 0 || len < 0 || len > b.length - off) {
+        throw new IndexOutOfBoundsException();
+      } else if (len == 0) {
+        return;
+      }
+      
+      if (position > length) {
+        throw new IOException("Cannot read after EOF.");
+      }
+      if (position < 0) {
+        throw new IOException("Cannot read to negative offset.");
+      }
+      
+      checkStream();
+      
+      if (position + len > length) {
+        throw new EOFException("Reach the end of stream.");
+      }
+      
+      System.arraycopy(data, (int)position, b, off, len);
+    }
+
+    @Override
+    public void readFully(long position, byte[] buffer) throws IOException {
+      readFully(position, buffer, 0, buffer.length);
+    }
+
+    @Override
+    public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
+        EnumSet<ReadOption> opts) throws IOException,
+        UnsupportedOperationException {
+      if (bufferPool == null) {
+        throw new IOException("Please specify buffer pool.");
+      }
+      ByteBuffer buffer = bufferPool.getBuffer(true, maxLength);
+      int pos = buffer.position();
+      int n = read(buffer);
+      if (n >= 0) {
+        buffer.position(pos);
+        return buffer;
+      }
+      
+      return null;
+    }
+
+    @Override
+    public void releaseBuffer(ByteBuffer buffer) {
+      
+    }
+
+    @Override
+    public void setReadahead(Long readahead) throws IOException,
+        UnsupportedOperationException {
+    }
+
+    @Override
+    public void setDropBehind(Boolean dropCache) throws IOException,
+        UnsupportedOperationException {
+    }
+
+    @Override
+    public FileDescriptor getFileDescriptor() throws IOException {
+      return null;
+    }
+    
+    @Override
+    public boolean seekToNewSource(long targetPos) throws IOException {
+      if (targetPos > length) {
+        throw new IOException("Attempted to read past end of file.");
+      }
+      if (targetPos < 0) {
+        throw new IOException("Cannot seek after EOF.");
+      }
+      checkStream();
+      this.pos = (int)targetPos;
+      return false;
+    }
+
+    @Override
+    public int read() throws IOException {
+      int ret = read( oneByteBuf, 0, 1 );
+      return ( ret <= 0 ) ? -1 : (oneByteBuf[0] & 0xff);
+    }
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
new file mode 100644
index 0000000..765a364
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsForLocalFS.java
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+public class TestCryptoStreamsForLocalFS extends CryptoStreamsTestBase {
+  private static final String TEST_ROOT_DIR
+    = System.getProperty("test.build.data","build/test/data") + "/work-dir/localfs";
+
+  private final File base = new File(TEST_ROOT_DIR);
+  private final Path file = new Path(TEST_ROOT_DIR, "test-file");
+  private static LocalFileSystem fileSys;
+  
+  @BeforeClass
+  public static void init() throws Exception {
+    Configuration conf = new Configuration();
+    conf = new Configuration(false);
+    conf.set("fs.file.impl", LocalFileSystem.class.getName());
+    fileSys = FileSystem.getLocal(conf);
+    conf.set(
+        CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX
+            + CipherSuite.AES_CTR_NOPADDING.getConfigSuffix(),
+        OpensslAesCtrCryptoCodec.class.getName() + ","
+            + JceAesCtrCryptoCodec.class.getName());
+    codec = CryptoCodec.getInstance(conf);
+  }
+  
+  @AfterClass
+  public static void shutdown() throws Exception {
+  }
+  
+  @Before
+  @Override
+  public void setUp() throws IOException {
+    fileSys.delete(new Path(TEST_ROOT_DIR), true);
+    super.setUp();
+  }
+  
+  @After
+  public void cleanUp() throws IOException {
+    FileUtil.setWritable(base, true);
+    FileUtil.fullyDelete(base);
+    assertTrue(!base.exists());
+  }
+  
+  @Override
+  protected OutputStream getOutputStream(int bufferSize, byte[] key, byte[] iv) 
+      throws IOException {
+    return new CryptoOutputStream(fileSys.create(file), codec, bufferSize, 
+        key, iv);
+  }
+  
+  @Override
+  protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv) 
+      throws IOException {
+    return new CryptoInputStream(fileSys.open(file), codec, bufferSize, 
+        key, iv);
+  }
+  
+  @Ignore("ChecksumFSInputChecker doesn't support ByteBuffer read")
+  @Override
+  @Test(timeout=1000)
+  public void testByteBufferRead() throws Exception {}
+  
+  @Ignore("ChecksumFSOutputSummer doesn't support Syncable")
+  @Override
+  @Test(timeout=1000)
+  public void testSyncable() throws IOException {}
+  
+  @Ignore("ChecksumFSInputChecker doesn't support ByteBuffer read")
+  @Override
+  @Test(timeout=1000)
+  public void testCombinedOp() throws Exception {}
+  
+  @Ignore("ChecksumFSInputChecker doesn't support enhanced ByteBuffer access")
+  @Override
+  @Test(timeout=1000)
+  public void testHasEnhancedByteBufferAccess() throws Exception {
+  }
+  
+  @Ignore("ChecksumFSInputChecker doesn't support seekToNewSource")
+  @Override
+  @Test(timeout=1000)
+  public void testSeekToNewSource() throws Exception {
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java
new file mode 100644
index 0000000..e9c313f
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsNormal.java
@@ -0,0 +1,123 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+/**
+ * Test crypto streams using normal stream which does not support the 
+ * additional interfaces that the Hadoop FileSystem streams implement 
+ * (Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, 
+ * CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess, Syncable, 
+ * CanSetDropBehind)
+ */
+public class TestCryptoStreamsNormal extends CryptoStreamsTestBase {
+  /**
+   * Data storage.
+   * {@link #getOutputStream(int, byte[], byte[])} will write to this buffer.
+   * {@link #getInputStream(int, byte[], byte[])} will read from this buffer.
+   */
+  private byte[] buffer;
+  private int bufferLen;
+  
+  @BeforeClass
+  public static void init() throws Exception {
+    Configuration conf = new Configuration();
+    codec = CryptoCodec.getInstance(conf);
+  }
+  
+  @AfterClass
+  public static void shutdown() throws Exception {
+  }
+
+  @Override
+  protected OutputStream getOutputStream(int bufferSize, byte[] key, byte[] iv)
+      throws IOException {
+    OutputStream out = new ByteArrayOutputStream() {
+      @Override
+      public void flush() throws IOException {
+        buffer = buf;
+        bufferLen = count;
+      }
+      @Override
+      public void close() throws IOException {
+        buffer = buf;
+        bufferLen = count;
+      }
+    };
+    return new CryptoOutputStream(out, codec, bufferSize, key, iv);
+  }
+
+  @Override
+  protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv)
+      throws IOException {
+    ByteArrayInputStream in = new ByteArrayInputStream(buffer, 0, bufferLen);
+    return new CryptoInputStream(in, codec, bufferSize, 
+        key, iv);
+  }
+  
+  @Ignore("Wrapped stream doesn't support Syncable")
+  @Override
+  @Test(timeout=1000)
+  public void testSyncable() throws IOException {}
+  
+  @Ignore("Wrapped stream doesn't support PositionedRead")
+  @Override
+  @Test(timeout=1000)
+  public void testPositionedRead() throws IOException {}
+
+  @Ignore("Wrapped stream doesn't support ReadFully")
+  @Override
+  @Test(timeout=1000)
+  public void testReadFully() throws IOException {}
+  
+  @Ignore("Wrapped stream doesn't support Seek")
+  @Override
+  @Test(timeout=1000)
+  public void testSeek() throws IOException {}
+  
+  @Ignore("Wrapped stream doesn't support ByteBufferRead")
+  @Override
+  @Test(timeout=1000)
+  public void testByteBufferRead() throws IOException {}
+  
+  @Ignore("Wrapped stream doesn't support ByteBufferRead, Seek")
+  @Override
+  @Test(timeout=1000)
+  public void testCombinedOp() throws IOException {}
+  
+  @Ignore("Wrapped stream doesn't support SeekToNewSource")
+  @Override
+  @Test(timeout=1000)
+  public void testSeekToNewSource() throws IOException {}
+  
+  @Ignore("Wrapped stream doesn't support HasEnhancedByteBufferAccess")
+  @Override
+  @Test(timeout=1000)
+  public void testHasEnhancedByteBufferAccess() throws IOException {}
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java
new file mode 100644
index 0000000..f64e8dc
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoStreamsWithOpensslAesCtrCryptoCodec.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.BeforeClass;
+
+public class TestCryptoStreamsWithOpensslAesCtrCryptoCodec 
+    extends TestCryptoStreams {
+  
+  @BeforeClass
+  public static void init() throws Exception {
+    Configuration conf = new Configuration();
+    codec = CryptoCodec.getInstance(conf);
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java
new file mode 100644
index 0000000..966a887
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestOpensslCipher.java
@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.nio.ByteBuffer;
+import java.security.NoSuchAlgorithmException;
+
+import javax.crypto.NoSuchPaddingException;
+import javax.crypto.ShortBufferException;
+
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assume;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestOpensslCipher {
+  private static final byte[] key = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+    0x07, 0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16};
+  private static final byte[] iv = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 
+    0x07, 0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08};
+  
+  @Test(timeout=120000)
+  public void testGetInstance() throws Exception {
+    Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null);
+    OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding");
+    Assert.assertTrue(cipher != null);
+    
+    try {
+      cipher = OpensslCipher.getInstance("AES2/CTR/NoPadding");
+      Assert.fail("Should specify correct algorithm.");
+    } catch (NoSuchAlgorithmException e) {
+      // Expect NoSuchAlgorithmException
+    }
+    
+    try {
+      cipher = OpensslCipher.getInstance("AES/CTR/NoPadding2");
+      Assert.fail("Should specify correct padding.");
+    } catch (NoSuchPaddingException e) {
+      // Expect NoSuchPaddingException
+    }
+  }
+  
+  @Test(timeout=120000)
+  public void testUpdateArguments() throws Exception {
+    Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null);
+    OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding");
+    Assert.assertTrue(cipher != null);
+    
+    cipher.init(OpensslCipher.ENCRYPT_MODE, key, iv);
+    
+    // Require direct buffers
+    ByteBuffer input = ByteBuffer.allocate(1024);
+    ByteBuffer output = ByteBuffer.allocate(1024);
+    
+    try {
+      cipher.update(input, output);
+      Assert.fail("Input and output buffer should be direct buffer.");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(
+          "Direct buffers are required", e);
+    }
+    
+    // Output buffer length should be sufficient to store output data 
+    input = ByteBuffer.allocateDirect(1024);
+    output = ByteBuffer.allocateDirect(1000);
+    try {
+      cipher.update(input, output);
+      Assert.fail("Output buffer length should be sufficient " +
+          "to store output data");
+    } catch (ShortBufferException e) {
+      GenericTestUtils.assertExceptionContains(
+          "Output buffer is not sufficient", e);
+    }
+  }
+  
+  @Test(timeout=120000)
+  public void testDoFinalArguments() throws Exception {
+    Assume.assumeTrue(OpensslCipher.getLoadingFailureReason() == null);
+    OpensslCipher cipher = OpensslCipher.getInstance("AES/CTR/NoPadding");
+    Assert.assertTrue(cipher != null);
+    
+    cipher.init(OpensslCipher.ENCRYPT_MODE, key, iv);
+    
+    // Require direct buffer
+    ByteBuffer output = ByteBuffer.allocate(1024);
+    
+    try {
+      cipher.doFinal(output);
+      Assert.fail("Output buffer should be direct buffer.");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(
+          "Direct buffer is required", e);
+    }
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOpensslSecureRandom.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOpensslSecureRandom.java
new file mode 100644
index 0000000..f40c6ac
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOpensslSecureRandom.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.random;
+
+import java.util.Arrays;
+
+import org.junit.Test;
+
+public class TestOpensslSecureRandom {
+  
+  @Test(timeout=120000)
+  public void testRandomBytes() throws Exception {
+    OpensslSecureRandom random = new OpensslSecureRandom();
+    
+    // len = 16
+    checkRandomBytes(random, 16);
+    // len = 32
+    checkRandomBytes(random, 32);
+    // len = 128
+    checkRandomBytes(random, 128);
+    // len = 256
+    checkRandomBytes(random, 256);
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  private void checkRandomBytes(OpensslSecureRandom random, int len) {
+    byte[] bytes = new byte[len];
+    byte[] bytes1 = new byte[len];
+    random.nextBytes(bytes);
+    random.nextBytes(bytes1);
+    
+    while (Arrays.equals(bytes, bytes1)) {
+      random.nextBytes(bytes1);
+    }
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomInt() throws Exception {
+    OpensslSecureRandom random = new OpensslSecureRandom();
+    
+    int rand1 = random.nextInt();
+    int rand2 = random.nextInt();
+    while (rand1 == rand2) {
+      rand2 = random.nextInt();
+    }
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomLong() throws Exception {
+    OpensslSecureRandom random = new OpensslSecureRandom();
+    
+    long rand1 = random.nextLong();
+    long rand2 = random.nextLong();
+    while (rand1 == rand2) {
+      rand2 = random.nextLong();
+    }
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomFloat() throws Exception {
+    OpensslSecureRandom random = new OpensslSecureRandom();
+    
+    float rand1 = random.nextFloat();
+    float rand2 = random.nextFloat();
+    while (rand1 == rand2) {
+      rand2 = random.nextFloat();
+    }
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomDouble() throws Exception {
+    OpensslSecureRandom random = new OpensslSecureRandom();
+    
+    double rand1 = random.nextDouble();
+    double rand2 = random.nextDouble();
+    while (rand1 == rand2) {
+      rand2 = random.nextDouble();
+    }
+  }
+}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
new file mode 100644
index 0000000..8fc5c70
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/random/TestOsSecureRandom.java
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.random;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.apache.commons.lang.SystemUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Assume;
+import org.junit.Test;
+
+public class TestOsSecureRandom {
+
+  private static OsSecureRandom getOsSecureRandom() throws IOException {
+    Assume.assumeTrue(SystemUtils.IS_OS_LINUX);
+    OsSecureRandom random = new OsSecureRandom();
+    random.setConf(new Configuration());
+    return random;
+  }
+
+  @Test(timeout=120000)
+  public void testRandomBytes() throws Exception {
+    OsSecureRandom random = getOsSecureRandom();
+    // len = 16
+    checkRandomBytes(random, 16);
+    // len = 32
+    checkRandomBytes(random, 32);
+    // len = 128
+    checkRandomBytes(random, 128);
+    // len = 256
+    checkRandomBytes(random, 256);
+    random.close();
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  private void checkRandomBytes(OsSecureRandom random, int len) {
+    byte[] bytes = new byte[len];
+    byte[] bytes1 = new byte[len];
+    random.nextBytes(bytes);
+    random.nextBytes(bytes1);
+    
+    while (Arrays.equals(bytes, bytes1)) {
+      random.nextBytes(bytes1);
+    }
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomInt() throws Exception {
+    OsSecureRandom random = getOsSecureRandom();
+    
+    int rand1 = random.nextInt();
+    int rand2 = random.nextInt();
+    while (rand1 == rand2) {
+      rand2 = random.nextInt();
+    }
+    random.close();
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomLong() throws Exception {
+    OsSecureRandom random = getOsSecureRandom();
+    
+    long rand1 = random.nextLong();
+    long rand2 = random.nextLong();
+    while (rand1 == rand2) {
+      rand2 = random.nextLong();
+    }
+    random.close();
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomFloat() throws Exception {
+    OsSecureRandom random = getOsSecureRandom();
+    
+    float rand1 = random.nextFloat();
+    float rand2 = random.nextFloat();
+    while (rand1 == rand2) {
+      rand2 = random.nextFloat();
+    }
+    random.close();
+  }
+  
+  /**
+   * Test will timeout if secure random implementation always returns a 
+   * constant value.
+   */
+  @Test(timeout=120000)
+  public void testRandomDouble() throws Exception {
+    OsSecureRandom random = getOsSecureRandom();
+    
+    double rand1 = random.nextDouble();
+    double rand2 = random.nextDouble();
+    while (rand1 == rand2) {
+      rand2 = random.nextDouble();
+    }
+    random.close();
+  }
+
+  @Test(timeout=120000)
+  public void testRefillReservoir() throws Exception {
+    OsSecureRandom random = getOsSecureRandom();
+
+    for (int i = 0; i < 8196; i++) {
+      random.nextLong();
+    }
+    random.close();
+  }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java
index 9efaca9..473c177 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestNativeCodeLoader.java
@@ -22,6 +22,7 @@
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.crypto.OpensslCipher;
 import org.apache.hadoop.io.compress.Lz4Codec;
 import org.apache.hadoop.io.compress.SnappyCodec;
 import org.apache.hadoop.io.compress.zlib.ZlibFactory;
@@ -54,6 +55,9 @@
     if (NativeCodeLoader.buildSupportsSnappy()) {
       assertFalse(SnappyCodec.getLibraryName().isEmpty());
     }
+    if (NativeCodeLoader.buildSupportsOpenssl()) {
+      assertFalse(OpensslCipher.getLibraryName().isEmpty());
+    }
     assertFalse(Lz4Codec.getLibraryName().isEmpty());
     LOG.info("TestNativeCodeLoader: libhadoop.so is loaded.");
   }
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
index c44eddaf..804b504 100644
--- a/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
+++ b/hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
@@ -324,7 +324,23 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*permission. Passing -f overwrites the destination if it already exists.( )*</expected-output>
+          <expected-output>^( |\t)*permission. Passing -f overwrites the destination if it already exists. raw( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*namespace extended attributes are preserved if \(1\) they are supported \(HDFS( )*</expected-output>
+        </comparator>
+        <comparator>
+            <type>RegexpComparator</type>
+            <expected-output>^( |\t)*only\) and, \(2\) all of the source and target pathnames are in the \/\.reserved\/raw( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*hierarchy. raw namespace xattr preservation is determined solely by the presence( )*</expected-output>
+        </comparator>
+        <comparator>
+            <type>RegexpComparator</type>
+            <expected-output>^\s*\(or absence\) of the \/\.reserved\/raw prefix and not by the -p option.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt
new file mode 100644
index 0000000..0171b82
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-fs-encryption.txt
@@ -0,0 +1,102 @@
+Hadoop HDFS Change Log for HDFS-6134 and HADOOP-10150
+
+fs-encryption (Unreleased)
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    HDFS-6387. HDFS CLI admin tool for creating & deleting an
+    encryption zone. (clamb)
+
+    HDFS-6386. HDFS Encryption Zones (clamb)
+
+    HDFS-6388. HDFS integration with KeyProvider. (clamb)
+
+    HDFS-6473. Protocol and API for Encryption Zones (clamb)
+
+    HDFS-6392. Wire crypto streams for encrypted files in
+    DFSClient. (clamb and yliu)
+
+    HDFS-6476. Print out the KeyProvider after finding KP successfully on
+    startup. (Juan Yu via wang)
+
+    HDFS-6391. Get the Key/IV from the NameNode for encrypted files in
+    DFSClient. (Charles Lamb and wang)
+
+    HDFS-6389. Rename restrictions for encryption zones. (clamb)
+
+    HDFS-6605. Client server negotiation of cipher suite. (wang)
+
+    HDFS-6625. Remove the Delete Encryption Zone function (clamb)
+
+    HDFS-6516. List of Encryption Zones should be based on inodes (clamb)
+
+    HDFS-6629. Not able to create symlinks after HDFS-6516 (umamaheswararao)
+
+    HDFS-6635. Refactor encryption zone functionality into new
+    EncryptionZoneManager class. (wang)
+
+    HDFS-6474. Namenode needs to get the actual keys and iv from the
+    KeyProvider. (wang)
+
+    HDFS-6619. Clean up encryption-related tests. (wang)
+
+    HDFS-6405. Test Crypto streams in HDFS. (yliu via wang)
+
+    HDFS-6490. Fix the keyid format for generated keys in
+    FSNamesystem.createEncryptionZone (clamb)
+
+    HDFS-6716. Update usage of KeyProviderCryptoExtension APIs on NameNode.
+    (wang)
+
+    HDFS-6718. Remove EncryptionZoneManager lock. (wang)
+
+    HDFS-6720. Remove KeyProvider in EncryptionZoneManager. (wang)
+
+    HDFS-6738. Remove unnecessary getEncryptionZoneForPath call in
+    EZManager#createEncryptionZone. (clamb)
+
+    HDFS-6724. Decrypt EDEK before creating
+    CryptoInputStream/CryptoOutputStream. (wang)
+
+    HDFS-6509. Create a special /.reserved/raw directory for raw access to
+    encrypted data. (clamb via wang)
+
+    HDFS-6771. Require specification of an encryption key when creating
+    an encryption zone. (wang)
+
+    HDFS-6730. Create a .RAW extended attribute namespace. (clamb)
+
+    HDFS-6692. Add more HDFS encryption tests. (wang)
+
+    HDFS-6780. Batch the encryption zones listing API. (wang)
+
+    HDFS-6394. HDFS encryption documentation. (wang)
+
+    HDFS-6834. Improve the configuration guidance in DFSClient when there 
+    are no Codec classes found in configs. (umamahesh)
+
+    HDFS-6546. Add non-superuser capability to get the encryption zone
+    for a specific path. (clamb)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HDFS-6733. Creating encryption zone results in NPE when
+    KeyProvider is null. (clamb)
+
+    HDFS-6785. Should not be able to create encryption zone using path
+    to a non-directory file. (clamb)
+
+    HDFS-6807. Fix TestReservedRawPaths. (clamb)
+
+    HDFS-6814. Mistakenly dfs.namenode.list.encryption.zones.num.responses configured
+    as boolean. (umamahesh)
+
+    HDFS-6817. Fix findbugs and other warnings. (yliu)
+
+    HDFS-6839. Fix TestCLI to expect new output. (clamb)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 5a0d7a8..9b026f2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -304,6 +304,7 @@
                   <include>datatransfer.proto</include>
                   <include>fsimage.proto</include>
                   <include>hdfs.proto</include>
+                  <include>encryption.proto</include>
                 </includes>
               </source>
               <output>${project.build.directory}/generated-sources/java</output>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index bb56362..77f1582 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -46,6 +46,7 @@
   echo "  snapshotDiff         diff two snapshots of a directory or diff the"
   echo "                       current directory contents with a snapshot"
   echo "  zkfc                 run the ZK Failover Controller daemon"
+  echo "  crypto               configure HDFS encryption zones"
   echo ""
   echo "Most commands print help when invoked w/o parameters."
 }
@@ -89,6 +90,9 @@
     echo "${CLASSPATH}"
     exit
   ;;
+  crypto)
+    CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
+  ;;
   datanode)
     daemon="true"
     # Determine if we're starting a secure datanode, and
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
index a0e75f8..111630c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.fs;
 
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
@@ -31,6 +30,7 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CryptoCodec;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -38,6 +38,8 @@
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSInputStream;
+import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
@@ -59,6 +61,7 @@
 public class Hdfs extends AbstractFileSystem {
 
   DFSClient dfs;
+  final CryptoCodec factory;
   private boolean verifyChecksum = true;
 
   static {
@@ -85,6 +88,7 @@
     }
 
     this.dfs = new DFSClient(theUri, conf, getStatistics());
+    this.factory = CryptoCodec.getInstance(conf);
   }
 
   @Override
@@ -97,9 +101,12 @@
       EnumSet<CreateFlag> createFlag, FsPermission absolutePermission,
       int bufferSize, short replication, long blockSize, Progressable progress,
       ChecksumOpt checksumOpt, boolean createParent) throws IOException {
-    return new HdfsDataOutputStream(dfs.primitiveCreate(getUriPath(f),
-        absolutePermission, createFlag, createParent, replication, blockSize,
-        progress, bufferSize, checksumOpt), getStatistics());
+
+    final DFSOutputStream dfsos = dfs.primitiveCreate(getUriPath(f),
+      absolutePermission, createFlag, createParent, replication, blockSize,
+      progress, bufferSize, checksumOpt);
+    return dfs.createWrappedOutputStream(dfsos, statistics,
+        dfsos.getInitialLen());
   }
 
   @Override
@@ -308,8 +315,9 @@
   @Override
   public HdfsDataInputStream open(Path f, int bufferSize) 
       throws IOException, UnresolvedLinkException {
-    return new DFSClient.DFSDataInputStream(dfs.open(getUriPath(f),
-        bufferSize, verifyChecksum));
+    final DFSInputStream dfsis = dfs.open(getUriPath(f),
+      bufferSize, verifyChecksum);
+    return dfs.createWrappedInputStream(dfsis);
   }
 
   @Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
index 99f629a..968ee00 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/XAttr.java
@@ -26,8 +26,8 @@
 /**
  * XAttr is the POSIX Extended Attribute model similar to that found in
  * traditional Operating Systems.  Extended Attributes consist of one
- * or more name/value pairs associated with a file or directory. Four
- * namespaces are defined: user, trusted, security and system.
+ * or more name/value pairs associated with a file or directory. Five
+ * namespaces are defined: user, trusted, security, system and raw.
  *   1) USER namespace attributes may be used by any user to store
  *   arbitrary information. Access permissions in this namespace are
  *   defined by a file directory's permission bits. For sticky directories,
@@ -43,6 +43,12 @@
  * <br>
  *   4) SECURITY namespace attributes are used by the fs kernel for
  *   security features. It is not visible to users.
+ * <br>
+ *   5) RAW namespace attributes are used for internal system attributes that
+ *   sometimes need to be exposed. Like SYSTEM namespace attributes they are
+ *   not visible to the user except when getXAttr/getXAttrs is called on a file
+ *   or directory in the /.reserved/raw HDFS directory hierarchy.  These
+ *   attributes can only be accessed by the superuser.
  * <p/>
  * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
  * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
@@ -55,7 +61,8 @@
     USER,
     TRUSTED,
     SECURITY,
-    SYSTEM;
+    SYSTEM,
+    RAW;
   }
   
   private final NameSpace ns;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index b9af35e..c49d210 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -17,6 +17,11 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
+import static org.apache.hadoop.crypto.key.KeyProviderCryptoExtension
+    .EncryptedKeyVersion;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
@@ -76,6 +81,7 @@
 import java.net.SocketAddress;
 import java.net.URI;
 import java.net.UnknownHostException;
+import java.security.GeneralSecurityException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.EnumSet;
@@ -95,6 +101,11 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.crypto.CryptoCodec;
+import org.apache.hadoop.crypto.CryptoInputStream;
+import org.apache.hadoop.crypto.CryptoOutputStream;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockStorageLocation;
 import org.apache.hadoop.fs.CacheFlag;
@@ -102,6 +113,7 @@
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsStatus;
@@ -140,6 +152,9 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -249,7 +264,11 @@
   private static final DFSHedgedReadMetrics HEDGED_READ_METRIC =
       new DFSHedgedReadMetrics();
   private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
-  
+  private final CryptoCodec codec;
+  @VisibleForTesting
+  List<CipherSuite> cipherSuites;
+  @VisibleForTesting
+  KeyProviderCryptoExtension provider;
   /**
    * DFSClient configuration 
    */
@@ -581,7 +600,17 @@
     this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
     this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + 
         DFSUtil.getRandom().nextInt()  + "_" + Thread.currentThread().getId();
-    
+    this.codec = CryptoCodec.getInstance(conf);
+    this.cipherSuites = Lists.newArrayListWithCapacity(1);
+    if (codec != null) {
+      cipherSuites.add(codec.getCipherSuite());
+    }
+    provider = DFSUtil.createKeyProviderCryptoExtension(conf);
+    if (provider == null) {
+      LOG.info("No KeyProvider found.");
+    } else {
+      LOG.info("Found KeyProvider: " + provider.toString());
+    }
     int numResponseToDrop = conf.getInt(
         DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
         DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
@@ -1280,7 +1309,93 @@
 
     return volumeBlockLocations;
   }
-  
+
+  /**
+   * Decrypts a EDEK by consulting the KeyProvider.
+   */
+  private KeyVersion decryptEncryptedDataEncryptionKey(FileEncryptionInfo
+      feInfo) throws IOException {
+    if (provider == null) {
+      throw new IOException("No KeyProvider is configured, cannot access" +
+          " an encrypted file");
+    }
+    EncryptedKeyVersion ekv = EncryptedKeyVersion.createForDecryption(
+        feInfo.getEzKeyVersionName(), feInfo.getIV(),
+        feInfo.getEncryptedDataEncryptionKey());
+    try {
+      return provider.decryptEncryptedKey(ekv);
+    } catch (GeneralSecurityException e) {
+      throw new IOException(e);
+    }
+  }
+
+  /**
+   * Wraps the stream in a CryptoInputStream if the underlying file is
+   * encrypted.
+   */
+  public HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis)
+      throws IOException {
+    final FileEncryptionInfo feInfo = dfsis.getFileEncryptionInfo();
+    if (feInfo != null) {
+      // File is encrypted, wrap the stream in a crypto stream.
+      KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
+      CryptoCodec codec = CryptoCodec
+          .getInstance(conf, feInfo.getCipherSuite());
+      if (codec == null) {
+        throw new IOException("No configuration found for the cipher suite "
+            + feInfo.getCipherSuite().getConfigSuffix() + " prefixed with "
+            + HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX
+            + ". Please see the example configuration "
+            + "hadoop.security.crypto.codec.classes.EXAMPLECIPHERSUITE "
+            + "at core-default.xml for details.");
+      }
+      final CryptoInputStream cryptoIn =
+          new CryptoInputStream(dfsis, codec, decrypted.getMaterial(),
+              feInfo.getIV());
+      return new HdfsDataInputStream(cryptoIn);
+    } else {
+      // No FileEncryptionInfo so no encryption.
+      return new HdfsDataInputStream(dfsis);
+    }
+  }
+
+  /**
+   * Wraps the stream in a CryptoOutputStream if the underlying file is
+   * encrypted.
+   */
+  public HdfsDataOutputStream createWrappedOutputStream(DFSOutputStream dfsos,
+      FileSystem.Statistics statistics) throws IOException {
+    return createWrappedOutputStream(dfsos, statistics, 0);
+  }
+
+  /**
+   * Wraps the stream in a CryptoOutputStream if the underlying file is
+   * encrypted.
+   */
+  public HdfsDataOutputStream createWrappedOutputStream(DFSOutputStream dfsos,
+      FileSystem.Statistics statistics, long startPos) throws IOException {
+    final FileEncryptionInfo feInfo = dfsos.getFileEncryptionInfo();
+    if (feInfo != null) {
+      if (codec == null) {
+        throw new IOException("No configuration found for the cipher suite "
+            + HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY + " value prefixed with "
+            + HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX
+            + ". Please see the example configuration "
+            + "hadoop.security.crypto.codec.classes.EXAMPLECIPHERSUITE "
+            + "at core-default.xml for details.");
+      }
+      // File is encrypted, wrap the stream in a crypto stream.
+      KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
+      final CryptoOutputStream cryptoOut =
+          new CryptoOutputStream(dfsos, codec,
+              decrypted.getMaterial(), feInfo.getIV(), startPos);
+      return new HdfsDataOutputStream(cryptoOut, statistics, startPos);
+    } else {
+      // No FileEncryptionInfo present so no encryption.
+      return new HdfsDataOutputStream(dfsos, statistics, startPos);
+    }
+  }
+
   public DFSInputStream open(String src) 
       throws IOException, UnresolvedLinkException {
     return open(src, dfsClientConf.ioBufferSize, true, null);
@@ -1483,7 +1598,8 @@
     }
     final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
         src, masked, flag, createParent, replication, blockSize, progress,
-        buffersize, dfsClientConf.createChecksum(checksumOpt), favoredNodeStrs);
+        buffersize, dfsClientConf.createChecksum(checksumOpt),
+        favoredNodeStrs, cipherSuites);
     beginFileLease(result.getFileId(), result);
     return result;
   }
@@ -1530,7 +1646,7 @@
       DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
       result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
           flag, createParent, replication, blockSize, progress, buffersize,
-          checksum);
+          checksum, null, cipherSuites);
     }
     beginFileLease(result.getFileId(), result);
     return result;
@@ -1608,7 +1724,7 @@
       final Progressable progress, final FileSystem.Statistics statistics
       ) throws IOException {
     final DFSOutputStream out = append(src, buffersize, progress);
-    return new HdfsDataOutputStream(out, statistics, out.getInitialLen());
+    return createWrappedOutputStream(out, statistics, out.getInitialLen());
   }
 
   private DFSOutputStream append(String src, int buffersize, Progressable progress) 
@@ -2753,6 +2869,36 @@
     }
   }
   
+  public void createEncryptionZone(String src, String keyName)
+    throws IOException {
+    checkOpen();
+    try {
+      namenode.createEncryptionZone(src, keyName);
+    } catch (RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class,
+                                     SafeModeException.class,
+                                     UnresolvedPathException.class);
+    }
+  }
+
+  public EncryptionZone getEZForPath(String src)
+          throws IOException {
+    checkOpen();
+    try {
+      final EncryptionZoneWithId ezi = namenode.getEZForPath(src);
+      return (ezi.getId() < 0) ? null : ezi;
+    } catch (RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class,
+                                     UnresolvedPathException.class);
+    }
+  }
+
+  public RemoteIterator<EncryptionZone> listEncryptionZones()
+      throws IOException {
+    checkOpen();
+    return new EncryptionZoneIterator(namenode);
+  }
+
   public void setXAttr(String src, String name, byte[] value, 
       EnumSet<XAttrSetFlag> flag) throws IOException {
     checkOpen();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index ac46cb6..71a530b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -567,7 +567,9 @@
   public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class";
   public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection";
   public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY = "dfs.data.transfer.saslproperties.resolver.class";
-  
+  public static final int    DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT = 100;
+  public static final String DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES = "dfs.namenode.list.encryption.zones.num.responses";
+
   // Journal-node related configs. These are read on the JN side.
   public static final String  DFS_JOURNALNODE_EDITS_DIR_KEY = "dfs.journalnode.edits.dir";
   public static final String  DFS_JOURNALNODE_EDITS_DIR_DEFAULT = "/tmp/hadoop/dfs/journalnode/";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 53ee4b2..af1ba14 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -56,6 +56,7 @@
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
@@ -92,6 +93,7 @@
   private final boolean verifyChecksum;
   private LocatedBlocks locatedBlocks = null;
   private long lastBlockBeingWrittenLength = 0;
+  private FileEncryptionInfo fileEncryptionInfo = null;
   private DatanodeInfo currentNode = null;
   private LocatedBlock currentLocatedBlock = null;
   private long pos = 0;
@@ -301,6 +303,8 @@
       }
     }
 
+    fileEncryptionInfo = locatedBlocks.getFileEncryptionInfo();
+
     currentNode = null;
     return lastBlockBeingWrittenLength;
   }
@@ -1525,6 +1529,10 @@
     return new ReadStatistics(readStatistics);
   }
 
+  public synchronized FileEncryptionInfo getFileEncryptionInfo() {
+    return fileEncryptionInfo;
+  }
+
   private synchronized void closeCurrentBlockReader() {
     if (blockReader == null) return;
     // Close the current block reader so that the new caching settings can 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index debf83c..4ba2a3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -42,10 +42,12 @@
 import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.fs.CanSetDropBehind;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSOutputSummer;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -153,7 +155,8 @@
   private boolean shouldSyncBlock = false; // force blocks to disk upon close
   private final AtomicReference<CachingStrategy> cachingStrategy;
   private boolean failPacket = false;
-  
+  private FileEncryptionInfo fileEncryptionInfo;
+
   private static class Packet {
     private static final long HEART_BEAT_SEQNO = -1L;
     final long seqno; // sequencenumber of buffer in block
@@ -1560,6 +1563,7 @@
     this.fileId = stat.getFileId();
     this.blockSize = stat.getBlockSize();
     this.blockReplication = stat.getReplication();
+    this.fileEncryptionInfo = stat.getFileEncryptionInfo();
     this.progress = progress;
     this.cachingStrategy = new AtomicReference<CachingStrategy>(
         dfsClient.getDefaultWriteCachingStrategy());
@@ -1600,12 +1604,13 @@
   static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
       FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
       short replication, long blockSize, Progressable progress, int buffersize,
-      DataChecksum checksum, String[] favoredNodes) throws IOException {
+      DataChecksum checksum, String[] favoredNodes,
+      List<CipherSuite> cipherSuites) throws IOException {
     final HdfsFileStatus stat;
     try {
       stat = dfsClient.namenode.create(src, masked, dfsClient.clientName,
           new EnumSetWritable<CreateFlag>(flag), createParent, replication,
-          blockSize);
+          blockSize, cipherSuites);
     } catch(RemoteException re) {
       throw re.unwrapRemoteException(AccessControlException.class,
                                      DSQuotaExceededException.class,
@@ -1615,7 +1620,8 @@
                                      NSQuotaExceededException.class,
                                      SafeModeException.class,
                                      UnresolvedPathException.class,
-                                     SnapshotAccessControlException.class);
+                                     SnapshotAccessControlException.class,
+                                     UnknownCipherSuiteException.class);
     }
     final DFSOutputStream out = new DFSOutputStream(dfsClient, src, stat,
         flag, progress, checksum, favoredNodes);
@@ -1623,14 +1629,6 @@
     return out;
   }
 
-  static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src,
-      FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent,
-      short replication, long blockSize, Progressable progress, int buffersize,
-      DataChecksum checksum) throws IOException {
-    return newStreamForCreate(dfsClient, src, masked, flag, createParent, replication,
-        blockSize, progress, buffersize, checksum, null);
-  }
-
   /** Construct a new output stream for append. */
   private DFSOutputStream(DFSClient dfsClient, String src,
       Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat,
@@ -1648,6 +1646,7 @@
           checksum.getBytesPerChecksum());
       streamer = new DataStreamer();
     }
+    this.fileEncryptionInfo = stat.getFileEncryptionInfo();
   }
 
   static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
@@ -2172,11 +2171,18 @@
   /**
    * Returns the size of a file as it was when this stream was opened
    */
-  long getInitialLen() {
+  public long getInitialLen() {
     return initialFileSize;
   }
 
   /**
+   * @return the FileEncryptionInfo for this stream, or null if not encrypted.
+   */
+  public FileEncryptionInfo getFileEncryptionInfo() {
+    return fileEncryptionInfo;
+  }
+
+  /**
    * Returns the access token currently used by streamer, for testing only
    */
   synchronized Token<BlockTokenIdentifier> getBlockToken() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index cfc5ca7..5559e0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -71,6 +71,9 @@
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -1722,4 +1725,39 @@
       }
     }
   }
+
+  /**
+   * Creates a new KeyProviderCryptoExtension by wrapping the
+   * KeyProvider specified in the given Configuration.
+   *
+   * @param conf Configuration specifying a single, non-transient KeyProvider.
+   * @return new KeyProviderCryptoExtension, or null if no provider was found.
+   * @throws IOException if the KeyProvider is improperly specified in
+   *                             the Configuration
+   */
+  public static KeyProviderCryptoExtension createKeyProviderCryptoExtension(
+      final Configuration conf) throws IOException {
+    final List<KeyProvider> providers = KeyProviderFactory.getProviders(conf);
+    if (providers == null || providers.size() == 0) {
+      return null;
+    }
+    if (providers.size() > 1) {
+      StringBuilder builder = new StringBuilder();
+      builder.append("Found multiple KeyProviders but only one is permitted [");
+      String prefix = " ";
+      for (KeyProvider kp: providers) {
+        builder.append(prefix + kp.toString());
+        prefix = ", ";
+      }
+      builder.append("]");
+      throw new IOException(builder.toString());
+    }
+    KeyProviderCryptoExtension provider = KeyProviderCryptoExtension
+        .createKeyProviderCryptoExtension(providers.get(0));
+    if (provider.isTransient()) {
+      throw new IOException("KeyProvider " + provider.toString()
+          + " was found but it is a transient provider.");
+    }
+    return provider;
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index e20c61f..354640b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -61,7 +61,6 @@
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -69,6 +68,7 @@
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
@@ -291,8 +291,9 @@
       @Override
       public FSDataInputStream doCall(final Path p)
           throws IOException, UnresolvedLinkException {
-        return new HdfsDataInputStream(
-            dfs.open(getPathName(p), bufferSize, verifyChecksum));
+        final DFSInputStream dfsis =
+          dfs.open(getPathName(p), bufferSize, verifyChecksum);
+        return dfs.createWrappedInputStream(dfsis);
       }
       @Override
       public FSDataInputStream next(final FileSystem fs, final Path p)
@@ -357,7 +358,7 @@
                 : EnumSet.of(CreateFlag.CREATE),
             true, replication, blockSize, progress, bufferSize, null,
             favoredNodes);
-        return new HdfsDataOutputStream(out, statistics);
+        return dfs.createWrappedOutputStream(out, statistics);
       }
       @Override
       public HdfsDataOutputStream next(final FileSystem fs, final Path p)
@@ -385,9 +386,10 @@
       @Override
       public FSDataOutputStream doCall(final Path p)
           throws IOException, UnresolvedLinkException {
-        return new HdfsDataOutputStream(dfs.create(getPathName(p), permission,
-            cflags, replication, blockSize, progress, bufferSize, checksumOpt),
-            statistics);
+        final DFSOutputStream dfsos = dfs.create(getPathName(p), permission,
+                cflags, replication, blockSize, progress, bufferSize,
+                checksumOpt);
+        return dfs.createWrappedOutputStream(dfsos, statistics);
       }
       @Override
       public FSDataOutputStream next(final FileSystem fs, final Path p)
@@ -404,11 +406,12 @@
     short replication, long blockSize, Progressable progress,
     ChecksumOpt checksumOpt) throws IOException {
     statistics.incrementWriteOps(1);
-    return new HdfsDataOutputStream(dfs.primitiveCreate(
-        getPathName(fixRelativePart(f)),
-        absolutePermission, flag, true, replication, blockSize,
-        progress, bufferSize, checksumOpt),statistics);
-   }
+    final DFSOutputStream dfsos = dfs.primitiveCreate(
+      getPathName(fixRelativePart(f)),
+      absolutePermission, flag, true, replication, blockSize,
+      progress, bufferSize, checksumOpt);
+    return dfs.createWrappedOutputStream(dfsos, statistics);
+  }
 
   /**
    * Same as create(), except fails if parent directory doesn't already exist.
@@ -428,9 +431,9 @@
       @Override
       public FSDataOutputStream doCall(final Path p) throws IOException,
           UnresolvedLinkException {
-        return new HdfsDataOutputStream(dfs.create(getPathName(p), permission,
-            flag, false, replication, blockSize, progress, bufferSize, null),
-            statistics);
+        final DFSOutputStream dfsos = dfs.create(getPathName(p), permission,
+          flag, false, replication, blockSize, progress, bufferSize, null);
+        return dfs.createWrappedOutputStream(dfsos, statistics);
       }
 
       @Override
@@ -1796,6 +1799,25 @@
     }.resolve(this, absF);
   }
   
+  /* HDFS only */
+  public void createEncryptionZone(Path path, String keyName)
+    throws IOException {
+    dfs.createEncryptionZone(getPathName(path), keyName);
+  }
+
+  /* HDFS only */
+  public EncryptionZone getEZForPath(Path path)
+          throws IOException {
+    Preconditions.checkNotNull(path);
+    return dfs.getEZForPath(getPathName(path));
+  }
+
+  /* HDFS only */
+  public RemoteIterator<EncryptionZone> listEncryptionZones()
+      throws IOException {
+    return dfs.listEncryptionZones();
+  }
+
   @Override
   public void setXAttr(Path path, final String name, final byte[] value, 
       final EnumSet<XAttrSetFlag> flag) throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/UnknownCipherSuiteException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/UnknownCipherSuiteException.java
new file mode 100644
index 0000000..b85edf6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/UnknownCipherSuiteException.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class UnknownCipherSuiteException extends IOException {
+  private static final long serialVersionUID = 8957192l;
+
+  public UnknownCipherSuiteException() {
+    super();
+  }
+
+  public UnknownCipherSuiteException(String msg) {
+    super(msg);
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
index abcd47a..04364ccf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
@@ -49,9 +49,9 @@
     Preconditions.checkNotNull(name, "XAttr name cannot be null.");
     
     final int prefixIndex = name.indexOf(".");
-    if (prefixIndex < 4) {// Prefix length is at least 4.
+    if (prefixIndex < 3) {// Prefix length is at least 3.
       throw new HadoopIllegalArgumentException("An XAttr name must be " +
-          "prefixed with user/trusted/security/system, followed by a '.'");
+          "prefixed with user/trusted/security/system/raw, followed by a '.'");
     } else if (prefixIndex == name.length() - 1) {
       throw new HadoopIllegalArgumentException("XAttr name cannot be empty.");
     }
@@ -66,9 +66,11 @@
       ns = NameSpace.SYSTEM;
     } else if (prefix.equals(NameSpace.SECURITY.toString().toLowerCase())) {
       ns = NameSpace.SECURITY;
+    } else if (prefix.equals(NameSpace.RAW.toString().toLowerCase())) {
+      ns = NameSpace.RAW;
     } else {
       throw new HadoopIllegalArgumentException("An XAttr name must be " +
-          "prefixed with user/trusted/security/system, followed by a '.'");
+          "prefixed with user/trusted/security/system/raw, followed by a '.'");
     }
     XAttr xAttr = (new XAttr.Builder()).setNameSpace(ns).setName(name.
         substring(prefixIndex + 1)).setValue(value).build();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
index 0f0769e..1adfc1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.client;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
 import java.util.EnumSet;
@@ -33,7 +34,9 @@
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 
 /**
@@ -225,4 +228,51 @@
   public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
     return dfs.listCachePools();
   }
+
+  /**
+   * Create an encryption zone rooted at an empty existing directory, using the
+   * specified encryption key. An encryption zone has an associated encryption
+   * key used when reading and writing files within the zone.
+   *
+   * @param path    The path of the root of the encryption zone. Must refer to
+   *                an empty, existing directory.
+   * @param keyName Name of key available at the KeyProvider.
+   * @throws IOException            if there was a general IO exception
+   * @throws AccessControlException if the caller does not have access to path
+   * @throws FileNotFoundException  if the path does not exist
+   */
+  public void createEncryptionZone(Path path, String keyName)
+    throws IOException, AccessControlException, FileNotFoundException {
+    dfs.createEncryptionZone(path, keyName);
+  }
+
+  /**
+   * Get the path of the encryption zone for a given file or directory.
+   *
+   * @param path The path to get the ez for.
+   *
+   * @return The EncryptionZone of the ez, or null if path is not in an ez.
+   * @throws IOException            if there was a general IO exception
+   * @throws AccessControlException if the caller does not have access to path
+   * @throws FileNotFoundException  if the path does not exist
+   */
+  public EncryptionZone getEncryptionZoneForPath(Path path)
+    throws IOException, AccessControlException, FileNotFoundException {
+    return dfs.getEZForPath(path);
+  }
+
+  /**
+   * Returns a RemoteIterator which can be used to list the encryption zones
+   * in HDFS. For large numbers of encryption zones, the iterator will fetch
+   * the list of zones in a number of small batches.
+   * <p/>
+   * Since the list is fetched in batches, it does not represent a
+   * consistent snapshot of the entire list of encryption zones.
+   * <p/>
+   * This method can only be called by HDFS superusers.
+   */
+  public RemoteIterator<EncryptionZone> listEncryptionZones()
+      throws IOException {
+    return dfs.listEncryptionZones();
+  }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
index 9ed895e..e1269c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java
@@ -17,17 +17,21 @@
  */
 package org.apache.hadoop.hdfs.client;
 
+import java.io.InputStream;
 import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.crypto.CryptoInputStream;
 import org.apache.hadoop.hdfs.DFSInputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 
+import com.google.common.base.Preconditions;
+
 /**
  * The Hdfs implementation of {@link FSDataInputStream}.
  */
@@ -38,25 +42,49 @@
     super(in);
   }
 
+  public HdfsDataInputStream(CryptoInputStream in) throws IOException {
+    super(in);
+    Preconditions.checkArgument(in.getWrappedStream() instanceof DFSInputStream,
+        "CryptoInputStream should wrap a DFSInputStream");
+  }
+
+  private DFSInputStream getDFSInputStream() {
+    if (in instanceof CryptoInputStream) {
+      return (DFSInputStream) ((CryptoInputStream) in).getWrappedStream();
+    }
+    return (DFSInputStream) in;
+  }
+
+  /**
+   * Get a reference to the wrapped output stream. We always want to return the
+   * actual underlying InputStream, even when we're using a CryptoStream. e.g.
+   * in the delegated methods below.
+   *
+   * @return the underlying output stream
+   */
+  public InputStream getWrappedStream() {
+      return in;
+  }
+
   /**
    * Get the datanode from which the stream is currently reading.
    */
   public DatanodeInfo getCurrentDatanode() {
-    return ((DFSInputStream) in).getCurrentDatanode();
+    return getDFSInputStream().getCurrentDatanode();
   }
 
   /**
    * Get the block containing the target position.
    */
   public ExtendedBlock getCurrentBlock() {
-    return ((DFSInputStream) in).getCurrentBlock();
+    return getDFSInputStream().getCurrentBlock();
   }
 
   /**
    * Get the collection of blocks that has already been located.
    */
   public synchronized List<LocatedBlock> getAllBlocks() throws IOException {
-    return ((DFSInputStream) in).getAllBlocks();
+    return getDFSInputStream().getAllBlocks();
   }
 
   /**
@@ -66,7 +94,7 @@
    * @return The visible length of the file.
    */
   public long getVisibleLength() throws IOException {
-    return ((DFSInputStream) in).getFileLength();
+    return getDFSInputStream().getFileLength();
   }
 
   /**
@@ -76,6 +104,6 @@
    * bytes read through HdfsDataInputStream.
    */
   public synchronized DFSInputStream.ReadStatistics getReadStatistics() {
-    return ((DFSInputStream) in).getReadStatistics();
+    return getDFSInputStream().getReadStatistics();
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
index adc8764..2149678 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java
@@ -18,14 +18,18 @@
 package org.apache.hadoop.hdfs.client;
 
 import java.io.IOException;
+import java.io.OutputStream;
 import java.util.EnumSet;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.crypto.CryptoOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSOutputStream;
 
+import com.google.common.base.Preconditions;
+
 /**
  * The Hdfs implementation of {@link FSDataOutputStream}.
  */
@@ -42,6 +46,18 @@
     this(out, stats, 0L);
   }
 
+  public HdfsDataOutputStream(CryptoOutputStream out, FileSystem.Statistics stats,
+      long startPosition) throws IOException {
+    super(out, stats, startPosition);
+    Preconditions.checkArgument(out.getWrappedStream() instanceof DFSOutputStream,
+        "CryptoOutputStream should wrap a DFSOutputStream");
+  }
+
+  public HdfsDataOutputStream(CryptoOutputStream out, FileSystem.Statistics stats)
+      throws IOException {
+    this(out, stats, 0L);
+  }
+
   /**
    * Get the actual number of replicas of the current block.
    * 
@@ -55,7 +71,11 @@
    * @return the number of valid replicas of the current block
    */
   public synchronized int getCurrentBlockReplication() throws IOException {
-    return ((DFSOutputStream)getWrappedStream()).getCurrentBlockReplication();
+    OutputStream wrappedStream = getWrappedStream();
+    if (wrappedStream instanceof CryptoOutputStream) {
+      wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream();
+    }
+    return ((DFSOutputStream) wrappedStream).getCurrentBlockReplication();
   }
   
   /**
@@ -67,14 +87,20 @@
    * @see FSDataOutputStream#hsync()
    */
   public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
-    ((DFSOutputStream) getWrappedStream()).hsync(syncFlags);
+    OutputStream wrappedStream = getWrappedStream();
+    if (wrappedStream instanceof CryptoOutputStream) {
+      ((CryptoOutputStream) wrappedStream).flush();
+      wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream();
+    }
+    ((DFSOutputStream) wrappedStream).hsync(syncFlags);
   }
   
   public static enum SyncFlag {
+
     /**
-     * When doing sync to DataNodes, also update the metadata (block
-     * length) in the NameNode
+     * When doing sync to DataNodes, also update the metadata (block length) in
+     * the NameNode.
      */
     UPDATE_LENGTH;
   }
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 8dbe1f7..ef0ac55 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -24,6 +24,7 @@
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.ContentSummary;
@@ -188,7 +189,8 @@
   @AtMostOnce
   public HdfsFileStatus create(String src, FsPermission masked,
       String clientName, EnumSetWritable<CreateFlag> flag,
-      boolean createParent, short replication, long blockSize)
+      boolean createParent, short replication, long blockSize, 
+      List<CipherSuite> cipherSuites)
       throws AccessControlException, AlreadyBeingCreatedException,
       DSQuotaExceededException, FileAlreadyExistsException,
       FileNotFoundException, NSQuotaExceededException,
@@ -1267,6 +1269,31 @@
   public AclStatus getAclStatus(String src) throws IOException;
   
   /**
+   * Create an encryption zone
+   */
+  @AtMostOnce
+  public void createEncryptionZone(String src, String keyName)
+    throws IOException;
+
+  /**
+   * Get the encryption zone for a path.
+   */
+  @Idempotent
+  public EncryptionZoneWithId getEZForPath(String src)
+    throws IOException;
+
+  /**
+   * Used to implement cursor-based batched listing of {@EncryptionZone}s.
+   *
+   * @param prevId ID of the last item in the previous batch. If there is no
+   *               previous batch, a negative value can be used.
+   * @return Batch of encryption zones.
+   */
+  @Idempotent
+  public BatchedEntries<EncryptionZoneWithId> listEncryptionZones(
+      long prevId) throws IOException;
+
+  /**
    * Set xattr of a file or directory.
    * The name must be prefixed with the namespace followed by ".". For example,
    * "user.attr".
@@ -1307,7 +1334,6 @@
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param src file or directory
-   * @param xAttrs xAttrs to get
    * @return List<XAttr> <code>XAttr</code> list
    * @throws IOException
    */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java
new file mode 100644
index 0000000..a20e93c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZone.java
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A simple class for representing an encryption zone. Presently an encryption
+ * zone only has a path (the root of the encryption zone) and a key name.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class EncryptionZone {
+
+  private final String path;
+  private final String keyName;
+
+  public EncryptionZone(String path, String keyName) {
+    this.path = path;
+    this.keyName = keyName;
+  }
+
+  public String getPath() {
+    return path;
+  }
+
+  public String getKeyName() {
+    return keyName;
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder(13, 31).
+      append(path).append(keyName).
+      toHashCode();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null) {
+      return false;
+    }
+    if (obj == this) {
+      return true;
+    }
+    if (obj.getClass() != getClass()) {
+      return false;
+    }
+
+    EncryptionZone rhs = (EncryptionZone) obj;
+    return new EqualsBuilder().
+      append(path, rhs.path).
+      append(keyName, rhs.keyName).
+      isEquals();
+  }
+
+  @Override
+  public String toString() {
+    return "EncryptionZone [path=" + path + ", keyName=" + keyName + "]";
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
new file mode 100644
index 0000000..ff308da
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneIterator.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.RemoteIterator;
+
+/**
+ * EncryptionZoneIterator is a remote iterator that iterates over encryption
+ * zones. It supports retrying in case of namenode failover.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class EncryptionZoneIterator implements RemoteIterator<EncryptionZone> {
+
+  private final EncryptionZoneWithIdIterator iterator;
+
+  public EncryptionZoneIterator(ClientProtocol namenode) {
+    iterator = new EncryptionZoneWithIdIterator(namenode);
+  }
+
+  @Override
+  public boolean hasNext() throws IOException {
+    return iterator.hasNext();
+  }
+
+  @Override
+  public EncryptionZone next() throws IOException {
+    EncryptionZoneWithId ezwi = iterator.next();
+    return ezwi.toEncryptionZone();
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithId.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithId.java
new file mode 100644
index 0000000..7ed4884
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithId.java
@@ -0,0 +1,64 @@
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Internal class similar to an {@link EncryptionZone} which also holds a
+ * unique id. Used to implement batched listing of encryption zones.
+ */
+@InterfaceAudience.Private
+public class EncryptionZoneWithId extends EncryptionZone {
+
+  final long id;
+
+  public EncryptionZoneWithId(String path, String keyName, long id) {
+    super(path, keyName);
+    this.id = id;
+  }
+
+  public long getId() {
+    return id;
+  }
+
+  EncryptionZone toEncryptionZone() {
+    return new EncryptionZone(getPath(), getKeyName());
+  }
+
+  @Override
+  public int hashCode() {
+    return new HashCodeBuilder(17, 29)
+        .append(super.hashCode())
+        .append(id)
+        .toHashCode();
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
+    if (!super.equals(o)) {
+      return false;
+    }
+
+    EncryptionZoneWithId that = (EncryptionZoneWithId) o;
+
+    if (id != that.id) {
+      return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public String toString() {
+    return "EncryptionZoneWithId [" +
+        "id=" + id +
+        ", " + super.toString() +
+        ']';
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithIdIterator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithIdIterator.java
new file mode 100644
index 0000000..78c7b62
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/EncryptionZoneWithIdIterator.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.BatchedRemoteIterator;
+
+/**
+ * Used on the client-side to iterate over the list of encryption zones
+ * stored on the namenode.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class EncryptionZoneWithIdIterator
+    extends BatchedRemoteIterator<Long, EncryptionZoneWithId> {
+
+  private final ClientProtocol namenode;
+
+  EncryptionZoneWithIdIterator(ClientProtocol namenode) {
+    super(Long.valueOf(0));
+    this.namenode = namenode;
+  }
+
+  @Override
+  public BatchedEntries<EncryptionZoneWithId> makeRequest(Long prevId)
+      throws IOException {
+    return namenode.listEncryptionZones(prevId);
+  }
+
+  @Override
+  public Long elementToPrevKey(EncryptionZoneWithId entry) {
+    return entry.getId();
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index 0652de1..3d05639 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -21,6 +21,7 @@
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -44,6 +45,8 @@
   private final String owner;
   private final String group;
   private final long fileId;
+
+  private final FileEncryptionInfo feInfo;
   
   // Used by dir, not including dot and dotdot. Always zero for a regular file.
   private final int childrenNum;
@@ -63,11 +66,12 @@
    * @param group the group of the path
    * @param path the local name in java UTF8 encoding the same as that in-memory
    * @param fileId the file id
+   * @param feInfo the file's encryption info
    */
   public HdfsFileStatus(long length, boolean isdir, int block_replication,
-                    long blocksize, long modification_time, long access_time,
-                    FsPermission permission, String owner, String group, 
-                    byte[] symlink, byte[] path, long fileId, int childrenNum) {
+      long blocksize, long modification_time, long access_time,
+      FsPermission permission, String owner, String group, byte[] symlink,
+    byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo) {
     this.length = length;
     this.isdir = isdir;
     this.block_replication = (short)block_replication;
@@ -85,6 +89,7 @@
     this.path = path;
     this.fileId = fileId;
     this.childrenNum = childrenNum;
+    this.feInfo = feInfo;
   }
 
   /**
@@ -238,6 +243,10 @@
     return fileId;
   }
   
+  public final FileEncryptionInfo getFileEncryptionInfo() {
+    return feInfo;
+  }
+
   public final int getChildrenNum() {
     return childrenNum;
   }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
index 6199b8e..a78b8bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
@@ -21,6 +21,7 @@
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -51,15 +52,16 @@
    * @param path local path name in java UTF8 format 
    * @param fileId the file id
    * @param locations block locations
+   * @param feInfo file encryption info
    */
   public HdfsLocatedFileStatus(long length, boolean isdir,
       int block_replication, long blocksize, long modification_time,
       long access_time, FsPermission permission, String owner, String group,
       byte[] symlink, byte[] path, long fileId, LocatedBlocks locations,
-      int childrenNum) {
+    int childrenNum, FileEncryptionInfo feInfo) {
     super(length, isdir, block_replication, blocksize, modification_time,
-        access_time, permission, owner, group, symlink, path, fileId,
-        childrenNum);
+      access_time, permission, owner, group, symlink, path, fileId,
+      childrenNum, feInfo);
     this.locations = locations;
   }
 	
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
index bac0e6a..436fa14 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
@@ -23,6 +23,7 @@
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FileEncryptionInfo;
 
 /**
  * Collection of blocks with their locations and the file length.
@@ -35,22 +36,23 @@
   private final boolean underConstruction;
   private LocatedBlock lastLocatedBlock = null;
   private boolean isLastBlockComplete = false;
+  private FileEncryptionInfo fileEncryptionInfo = null;
 
   public LocatedBlocks() {
     fileLength = 0;
     blocks = null;
     underConstruction = false;
   }
-  
-  /** public Constructor */
+
   public LocatedBlocks(long flength, boolean isUnderConstuction,
-      List<LocatedBlock> blks, 
-      LocatedBlock lastBlock, boolean isLastBlockCompleted) {
+    List<LocatedBlock> blks, LocatedBlock lastBlock,
+    boolean isLastBlockCompleted, FileEncryptionInfo feInfo) {
     fileLength = flength;
     blocks = blks;
     underConstruction = isUnderConstuction;
     this.lastLocatedBlock = lastBlock;
     this.isLastBlockComplete = isLastBlockCompleted;
+    this.fileEncryptionInfo = feInfo;
   }
   
   /**
@@ -92,13 +94,20 @@
   }
 
   /**
-   * Return ture if file was under construction when 
-   * this LocatedBlocks was constructed, false otherwise.
+   * Return true if file was under construction when this LocatedBlocks was
+   * constructed, false otherwise.
    */
   public boolean isUnderConstruction() {
     return underConstruction;
   }
-  
+
+  /**
+   * @return the FileEncryptionInfo for the LocatedBlocks
+   */
+  public FileEncryptionInfo getFileEncryptionInfo() {
+    return fileEncryptionInfo;
+  }
+
   /**
    * Find block containing specified offset.
    * 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
index 959439b..d395283 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
@@ -61,7 +61,7 @@
       int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
     this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
         access_time, permission, owner, group, null, localName, inodeId,
-        childrenNum);
+        childrenNum, null);
     this.snapshotNumber = snapshotNumber;
     this.snapshotQuota = snapshotQuota;
     this.parentFullPath = parentFullPath;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index c4211b1..40dd8f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -176,6 +177,12 @@
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
@@ -376,7 +383,8 @@
       HdfsFileStatus result = server.create(req.getSrc(),
           PBHelper.convert(req.getMasked()), req.getClientName(),
           PBHelper.convertCreateFlag(req.getCreateFlag()), req.getCreateParent(),
-          (short) req.getReplication(), req.getBlockSize());
+          (short) req.getReplication(), req.getBlockSize(), 
+          PBHelper.convertCipherSuiteProtos(req.getCipherSuitesList()));
 
       if (result != null) {
         return CreateResponseProto.newBuilder().setFs(PBHelper.convert(result))
@@ -1301,6 +1309,52 @@
   }
   
   @Override
+  public CreateEncryptionZoneResponseProto createEncryptionZone(
+    RpcController controller, CreateEncryptionZoneRequestProto req)
+    throws ServiceException {
+    try {
+      server.createEncryptionZone(req.getSrc(), req.getKeyName());
+      return CreateEncryptionZoneResponseProto.newBuilder().build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public GetEZForPathResponseProto getEZForPath(
+      RpcController controller, GetEZForPathRequestProto req)
+      throws ServiceException {
+    try {
+      GetEZForPathResponseProto.Builder builder =
+          GetEZForPathResponseProto.newBuilder();
+      final EncryptionZoneWithId ret = server.getEZForPath(req.getSrc());
+      builder.setZone(PBHelper.convert(ret));
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public ListEncryptionZonesResponseProto listEncryptionZones(
+    RpcController controller, ListEncryptionZonesRequestProto req)
+    throws ServiceException {
+    try {
+      BatchedEntries<EncryptionZoneWithId> entries = server
+          .listEncryptionZones(req.getId());
+      ListEncryptionZonesResponseProto.Builder builder =
+          ListEncryptionZonesResponseProto.newBuilder();
+      builder.setHasMore(entries.hasMore());
+      for (int i=0; i<entries.size(); i++) {
+        builder.addZones(PBHelper.convert(entries.get(i)));
+      }
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
   public SetXAttrResponseProto setXAttr(RpcController controller,
       SetXAttrRequestProto req) throws ServiceException {
     try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index 85dbb7d..210828d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -24,8 +24,10 @@
 import java.util.EnumSet;
 import java.util.List;
 
+import com.google.common.collect.Lists;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.ContentSummary;
@@ -52,6 +54,7 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
@@ -146,6 +149,10 @@
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
@@ -172,6 +179,11 @@
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ServiceException;
 
+
+import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
+import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos
+    .EncryptionZoneWithIdProto;
+
 /**
  * This class forwards NN's ClientProtocol calls as RPC calls to the NN server
  * while translating from the parameter types used in ClientProtocol to the
@@ -249,21 +261,25 @@
   @Override
   public HdfsFileStatus create(String src, FsPermission masked,
       String clientName, EnumSetWritable<CreateFlag> flag,
-      boolean createParent, short replication, long blockSize)
+      boolean createParent, short replication, long blockSize, 
+      List<CipherSuite> cipherSuites)
       throws AccessControlException, AlreadyBeingCreatedException,
       DSQuotaExceededException, FileAlreadyExistsException,
       FileNotFoundException, NSQuotaExceededException,
       ParentNotDirectoryException, SafeModeException, UnresolvedLinkException,
       IOException {
-    CreateRequestProto req = CreateRequestProto.newBuilder()
+    CreateRequestProto.Builder builder = CreateRequestProto.newBuilder()
         .setSrc(src)
         .setMasked(PBHelper.convert(masked))
         .setClientName(clientName)
         .setCreateFlag(PBHelper.convertCreateFlag(flag))
         .setCreateParent(createParent)
         .setReplication(replication)
-        .setBlockSize(blockSize)
-        .build();
+        .setBlockSize(blockSize);
+    if (cipherSuites != null) {
+      builder.addAllCipherSuites(PBHelper.convertCipherSuites(cipherSuites));
+    }
+    CreateRequestProto req = builder.build();
     try {
       CreateResponseProto res = rpcProxy.create(null, req);
       return res.hasFs() ? PBHelper.convert(res.getFs()) : null;
@@ -1292,7 +1308,62 @@
       throw ProtobufHelper.getRemoteException(e);
     }
   }
-  
+
+  @Override
+  public void createEncryptionZone(String src, String keyName)
+    throws IOException {
+    final CreateEncryptionZoneRequestProto.Builder builder =
+      CreateEncryptionZoneRequestProto.newBuilder();
+    builder.setSrc(src);
+    if (keyName != null && !keyName.isEmpty()) {
+      builder.setKeyName(keyName);
+    }
+    CreateEncryptionZoneRequestProto req = builder.build();
+    try {
+      rpcProxy.createEncryptionZone(null, req);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public EncryptionZoneWithId getEZForPath(String src)
+      throws IOException {
+    final GetEZForPathRequestProto.Builder builder =
+        GetEZForPathRequestProto.newBuilder();
+    builder.setSrc(src);
+    final GetEZForPathRequestProto req = builder.build();
+    try {
+      final EncryptionZonesProtos.GetEZForPathResponseProto response =
+          rpcProxy.getEZForPath(null, req);
+      return PBHelper.convert(response.getZone());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public BatchedEntries<EncryptionZoneWithId> listEncryptionZones(long id)
+      throws IOException {
+    final ListEncryptionZonesRequestProto req =
+      ListEncryptionZonesRequestProto.newBuilder()
+          .setId(id)
+          .build();
+    try {
+      EncryptionZonesProtos.ListEncryptionZonesResponseProto response =
+          rpcProxy.listEncryptionZones(null, req);
+      List<EncryptionZoneWithId> elements =
+          Lists.newArrayListWithCapacity(response.getZonesCount());
+      for (EncryptionZoneWithIdProto p : response.getZonesList()) {
+        elements.add(PBHelper.convert(p));
+      }
+      return new BatchedListEntries<EncryptionZoneWithId>(elements,
+          response.getHasMore());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
   @Override
   public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
       throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 7e98a88..4dcac39 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.protocolPB;
 
 import static com.google.common.base.Preconditions.checkNotNull;
+import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos
+    .EncryptionZoneWithIdProto;
 
 import java.io.EOFException;
 import java.io.IOException;
@@ -51,6 +53,7 @@
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolStats;
+import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -58,7 +61,9 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.hdfs.protocol.FsAclPermission;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
@@ -1178,7 +1183,9 @@
         lb.getFileLength(), lb.getUnderConstruction(),
         PBHelper.convertLocatedBlock(lb.getBlocksList()),
         lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null,
-        lb.getIsLastBlockComplete());
+        lb.getIsLastBlockComplete(),
+        lb.hasFileEncryptionInfo() ? convert(lb.getFileEncryptionInfo()) :
+            null);
   }
   
   public static LocatedBlocksProto convert(LocatedBlocks lb) {
@@ -1190,6 +1197,9 @@
     if (lb.getLastLocatedBlock() != null) {
       builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock()));
     }
+    if (lb.getFileEncryptionInfo() != null) {
+      builder.setFileEncryptionInfo(convert(lb.getFileEncryptionInfo()));
+    }
     return builder.setFileLength(lb.getFileLength())
         .setUnderConstruction(lb.isUnderConstruction())
         .addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks()))
@@ -1315,7 +1325,9 @@
         fs.getPath().toByteArray(),
         fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID,
         fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null,
-        fs.hasChildrenNum() ? fs.getChildrenNum() : -1);
+        fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
+        fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) :
+            null);
   }
 
   public static SnapshottableDirectoryStatus convert(
@@ -1365,6 +1377,9 @@
     if (fs.isSymlink())  {
       builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes()));
     }
+    if (fs.getFileEncryptionInfo() != null) {
+      builder.setFileEncryptionInfo(convert(fs.getFileEncryptionInfo()));
+    }
     if (fs instanceof HdfsLocatedFileStatus) {
       LocatedBlocks locations = ((HdfsLocatedFileStatus)fs).getBlockLocations();
       if (locations != null) {
@@ -2253,7 +2268,7 @@
     }
     return xAttrs;
   }
-  
+
   public static List<XAttr> convert(GetXAttrsResponseProto a) {
     List<XAttrProto> xAttrs = a.getXAttrsList();
     return convertXAttrs(xAttrs);
@@ -2284,6 +2299,18 @@
     return builder.build();
   }
 
+  public static EncryptionZoneWithIdProto convert(EncryptionZoneWithId zone) {
+    return EncryptionZoneWithIdProto.newBuilder()
+        .setId(zone.getId())
+        .setKeyName(zone.getKeyName())
+        .setPath(zone.getPath()).build();
+  }
+
+  public static EncryptionZoneWithId convert(EncryptionZoneWithIdProto proto) {
+    return new EncryptionZoneWithId(proto.getPath(), proto.getKeyName(),
+        proto.getId());
+  }
+
   public static ShortCircuitShmSlotProto convert(SlotId slotId) {
     return ShortCircuitShmSlotProto.newBuilder().
         setShmId(convert(slotId.getShmId())).
@@ -2307,5 +2334,75 @@
   public static ShmId convert(ShortCircuitShmIdProto shmId) {
     return new ShmId(shmId.getHi(), shmId.getLo());
   }
-}
 
+  public static HdfsProtos.CipherSuite convert(CipherSuite suite) {
+    switch (suite) {
+    case UNKNOWN:
+      return HdfsProtos.CipherSuite.UNKNOWN;
+    case AES_CTR_NOPADDING:
+      return HdfsProtos.CipherSuite.AES_CTR_NOPADDING;
+    default:
+      return null;
+    }
+  }
+
+  public static CipherSuite convert(HdfsProtos.CipherSuite proto) {
+    switch (proto) {
+    case AES_CTR_NOPADDING:
+      return CipherSuite.AES_CTR_NOPADDING;
+    default:
+      // Set to UNKNOWN and stash the unknown enum value
+      CipherSuite suite = CipherSuite.UNKNOWN;
+      suite.setUnknownValue(proto.getNumber());
+      return suite;
+    }
+  }
+
+  public static List<HdfsProtos.CipherSuite> convertCipherSuites
+      (List<CipherSuite> suites) {
+    if (suites == null) {
+      return null;
+    }
+    List<HdfsProtos.CipherSuite> protos =
+        Lists.newArrayListWithCapacity(suites.size());
+    for (CipherSuite suite : suites) {
+      protos.add(convert(suite));
+    }
+    return protos;
+  }
+
+  public static List<CipherSuite> convertCipherSuiteProtos(
+      List<HdfsProtos.CipherSuite> protos) {
+    List<CipherSuite> suites = Lists.newArrayListWithCapacity(protos.size());
+    for (HdfsProtos.CipherSuite proto : protos) {
+      suites.add(convert(proto));
+    }
+    return suites;
+  }
+
+  public static HdfsProtos.FileEncryptionInfoProto convert(
+      FileEncryptionInfo info) {
+    if (info == null) {
+      return null;
+    }
+    return HdfsProtos.FileEncryptionInfoProto.newBuilder()
+        .setSuite(convert(info.getCipherSuite()))
+        .setKey(getByteString(info.getEncryptedDataEncryptionKey()))
+        .setIv(getByteString(info.getIV()))
+        .setEzKeyVersionName(info.getEzKeyVersionName())
+        .build();
+  }
+
+  public static FileEncryptionInfo convert(
+      HdfsProtos.FileEncryptionInfoProto proto) {
+    if (proto == null) {
+      return null;
+    }
+    CipherSuite suite = convert(proto.getSuite());
+    byte[] key = proto.getKey().toByteArray();
+    byte[] iv = proto.getIv().toByteArray();
+    String ezKeyVersionName = proto.getEzKeyVersionName();
+    return new FileEncryptionInfo(suite, key, iv, ezKeyVersionName);
+  }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 6fdec77..8470680 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -52,6 +52,8 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.fs.FileEncryptionInfo;
+
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
@@ -839,14 +841,15 @@
   public LocatedBlocks createLocatedBlocks(final BlockInfo[] blocks,
       final long fileSizeExcludeBlocksUnderConstruction,
       final boolean isFileUnderConstruction, final long offset,
-      final long length, final boolean needBlockToken, final boolean inSnapshot)
+      final long length, final boolean needBlockToken,
+      final boolean inSnapshot, FileEncryptionInfo feInfo)
       throws IOException {
     assert namesystem.hasReadLock();
     if (blocks == null) {
       return null;
     } else if (blocks.length == 0) {
       return new LocatedBlocks(0, isFileUnderConstruction,
-          Collections.<LocatedBlock>emptyList(), null, false);
+          Collections.<LocatedBlock>emptyList(), null, false, feInfo);
     } else {
       if (LOG.isDebugEnabled()) {
         LOG.debug("blocks = " + java.util.Arrays.asList(blocks));
@@ -871,7 +874,7 @@
       }
       return new LocatedBlocks(
           fileSizeExcludeBlocksUnderConstruction, isFileUnderConstruction,
-          locatedblocks, lastlb, isComplete);
+          locatedblocks, lastlb, isComplete, feInfo);
     }
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 8c63cc4..98c6398 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -294,5 +294,10 @@
   
   public static final String NAMENODE_LEASE_HOLDER = "HDFS_NameNode";
   public static final long NAMENODE_LEASE_RECHECK_INTERVAL = 2000;
+
+  public static final String CRYPTO_XATTR_ENCRYPTION_ZONE =
+      "raw.hdfs.crypto.encryption.zone";
+  public static final String CRYPTO_XATTR_FILE_ENCRYPTION_INFO =
+      "raw.hdfs.crypto.file.encryption.info";
 }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java
new file mode 100644
index 0000000..2e65a89
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionFaultInjector.java
@@ -0,0 +1,22 @@
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Used to inject certain faults for testing.
+ */
+public class EncryptionFaultInjector {
+  @VisibleForTesting
+  public static EncryptionFaultInjector instance =
+      new EncryptionFaultInjector();
+
+  @VisibleForTesting
+  public static EncryptionFaultInjector getInstance() {
+    return instance;
+  }
+
+  @VisibleForTesting
+  public void startFileAfterGenerateKey() throws IOException {}
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
new file mode 100644
index 0000000..e45d540
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -0,0 +1,296 @@
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.XAttrHelper;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
+import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants
+    .CRYPTO_XATTR_ENCRYPTION_ZONE;
+
+/**
+ * Manages the list of encryption zones in the filesystem.
+ * <p/>
+ * The EncryptionZoneManager has its own lock, but relies on the FSDirectory
+ * lock being held for many operations. The FSDirectory lock should not be
+ * taken if the manager lock is already held.
+ */
+public class EncryptionZoneManager {
+
+  public static Logger LOG = LoggerFactory.getLogger(EncryptionZoneManager
+      .class);
+
+  private static final EncryptionZoneWithId NULL_EZ =
+      new EncryptionZoneWithId("", "", -1);
+
+  /**
+   * EncryptionZoneInt is the internal representation of an encryption zone. The
+   * external representation of an EZ is embodied in an EncryptionZone and
+   * contains the EZ's pathname.
+   */
+  private static class EncryptionZoneInt {
+    private final String keyName;
+    private final long inodeId;
+
+    EncryptionZoneInt(long inodeId, String keyName) {
+      this.keyName = keyName;
+      this.inodeId = inodeId;
+    }
+
+    String getKeyName() {
+      return keyName;
+    }
+
+    long getINodeId() {
+      return inodeId;
+    }
+  }
+
+  private final TreeMap<Long, EncryptionZoneInt> encryptionZones;
+  private final FSDirectory dir;
+  private final int maxListEncryptionZonesResponses;
+
+  /**
+   * Construct a new EncryptionZoneManager.
+   *
+   * @param dir Enclosing FSDirectory
+   */
+  public EncryptionZoneManager(FSDirectory dir, Configuration conf) {
+    this.dir = dir;
+    encryptionZones = new TreeMap<Long, EncryptionZoneInt>();
+    maxListEncryptionZonesResponses = conf.getInt(
+        DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
+        DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT
+    );
+    Preconditions.checkArgument(maxListEncryptionZonesResponses >= 0,
+        DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES + " " +
+            "must be a positive integer."
+    );
+  }
+
+  /**
+   * Add a new encryption zone.
+   * <p/>
+   * Called while holding the FSDirectory lock.
+   *
+   * @param inodeId of the encryption zone
+   * @param keyName encryption zone key name
+   */
+  void addEncryptionZone(Long inodeId, String keyName) {
+    assert dir.hasWriteLock();
+    final EncryptionZoneInt ez = new EncryptionZoneInt(inodeId, keyName);
+    encryptionZones.put(inodeId, ez);
+  }
+
+  /**
+   * Remove an encryption zone.
+   * <p/>
+   * Called while holding the FSDirectory lock.
+   */
+  void removeEncryptionZone(Long inodeId) {
+    assert dir.hasWriteLock();
+    encryptionZones.remove(inodeId);
+  }
+
+  /**
+   * Returns true if an IIP is within an encryption zone.
+   * <p/>
+   * Called while holding the FSDirectory lock.
+   */
+  boolean isInAnEZ(INodesInPath iip)
+      throws UnresolvedLinkException, SnapshotAccessControlException {
+    assert dir.hasReadLock();
+    return (getEncryptionZoneForPath(iip) != null);
+  }
+
+  /**
+   * Returns the path of the EncryptionZoneInt.
+   * <p/>
+   * Called while holding the FSDirectory lock.
+   */
+  private String getFullPathName(EncryptionZoneInt ezi) {
+    assert dir.hasReadLock();
+    return dir.getInode(ezi.getINodeId()).getFullPathName();
+  }
+
+  /**
+   * Get the key name for an encryption zone. Returns null if <tt>iip</tt> is
+   * not within an encryption zone.
+   * <p/>
+   * Called while holding the FSDirectory lock.
+   */
+  String getKeyName(final INodesInPath iip) {
+    assert dir.hasReadLock();
+    EncryptionZoneInt ezi = getEncryptionZoneForPath(iip);
+    if (ezi == null) {
+      return null;
+    }
+    return ezi.getKeyName();
+  }
+
+  /**
+   * Looks up the EncryptionZoneInt for a path within an encryption zone.
+   * Returns null if path is not within an EZ.
+   * <p/>
+   * Must be called while holding the manager lock.
+   */
+  private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) {
+    assert dir.hasReadLock();
+    Preconditions.checkNotNull(iip);
+    final INode[] inodes = iip.getINodes();
+    for (int i = inodes.length - 1; i >= 0; i--) {
+      final INode inode = inodes[i];
+      if (inode != null) {
+        final EncryptionZoneInt ezi = encryptionZones.get(inode.getId());
+        if (ezi != null) {
+          return ezi;
+        }
+      }
+    }
+    return null;
+  }
+
+  /**
+   * Returns an EncryptionZoneWithId representing the ez for a given path.
+   * Returns an empty marker EncryptionZoneWithId if path is not in an ez.
+   *
+   * @param iip The INodesInPath of the path to check
+   * @return the EncryptionZoneWithId representing the ez for the path.
+   */
+  EncryptionZoneWithId getEZINodeForPath(INodesInPath iip) {
+    final EncryptionZoneInt ezi = getEncryptionZoneForPath(iip);
+    if (ezi == null) {
+      return NULL_EZ;
+    } else {
+      return new EncryptionZoneWithId(getFullPathName(ezi), ezi.getKeyName(),
+          ezi.getINodeId());
+    }
+  }
+
+  /**
+   * Throws an exception if the provided path cannot be renamed into the
+   * destination because of differing encryption zones.
+   * <p/>
+   * Called while holding the FSDirectory lock.
+   *
+   * @param srcIIP source IIP
+   * @param dstIIP destination IIP
+   * @param src    source path, used for debugging
+   * @throws IOException if the src cannot be renamed to the dst
+   */
+  void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src)
+      throws IOException {
+    assert dir.hasReadLock();
+    final EncryptionZoneInt srcEZI = getEncryptionZoneForPath(srcIIP);
+    final EncryptionZoneInt dstEZI = getEncryptionZoneForPath(dstIIP);
+    final boolean srcInEZ = (srcEZI != null);
+    final boolean dstInEZ = (dstEZI != null);
+    if (srcInEZ) {
+      if (!dstInEZ) {
+        throw new IOException(
+            src + " can't be moved from an encryption zone.");
+      }
+    } else {
+      if (dstInEZ) {
+        throw new IOException(
+            src + " can't be moved into an encryption zone.");
+      }
+    }
+
+    if (srcInEZ || dstInEZ) {
+      Preconditions.checkState(srcEZI != null, "couldn't find src EZ?");
+      Preconditions.checkState(dstEZI != null, "couldn't find dst EZ?");
+      if (srcEZI != dstEZI) {
+        final String srcEZPath = getFullPathName(srcEZI);
+        final String dstEZPath = getFullPathName(dstEZI);
+        final StringBuilder sb = new StringBuilder(src);
+        sb.append(" can't be moved from encryption zone ");
+        sb.append(srcEZPath);
+        sb.append(" to encryption zone ");
+        sb.append(dstEZPath);
+        sb.append(".");
+        throw new IOException(sb.toString());
+      }
+    }
+  }
+
+  /**
+   * Create a new encryption zone.
+   * <p/>
+   * Called while holding the FSDirectory lock.
+   */
+  XAttr createEncryptionZone(String src, String keyName)
+      throws IOException {
+    assert dir.hasWriteLock();
+    if (dir.isNonEmptyDirectory(src)) {
+      throw new IOException(
+          "Attempt to create an encryption zone for a non-empty directory.");
+    }
+
+    final INodesInPath srcIIP = dir.getINodesInPath4Write(src, false);
+    if (srcIIP != null &&
+        srcIIP.getLastINode() != null &&
+        !srcIIP.getLastINode().isDirectory()) {
+      throw new IOException("Attempt to create an encryption zone for a file.");
+    }
+    EncryptionZoneInt ezi = getEncryptionZoneForPath(srcIIP);
+    if (ezi != null) {
+      throw new IOException("Directory " + src + " is already in an " +
+          "encryption zone. (" + getFullPathName(ezi) + ")");
+    }
+
+    final XAttr ezXAttr = XAttrHelper
+        .buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, keyName.getBytes());
+
+    final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
+    xattrs.add(ezXAttr);
+    // updating the xattr will call addEncryptionZone,
+    // done this way to handle edit log loading
+    dir.unprotectedSetXAttrs(src, xattrs, EnumSet.of(XAttrSetFlag.CREATE));
+    return ezXAttr;
+  }
+
+  /**
+   * Cursor-based listing of encryption zones.
+   * <p/>
+   * Called while holding the FSDirectory lock.
+   */
+  BatchedListEntries<EncryptionZoneWithId> listEncryptionZones(long prevId)
+      throws IOException {
+    assert dir.hasReadLock();
+    NavigableMap<Long, EncryptionZoneInt> tailMap = encryptionZones.tailMap
+        (prevId, false);
+    final int numResponses = Math.min(maxListEncryptionZonesResponses,
+        tailMap.size());
+    final List<EncryptionZoneWithId> zones =
+        Lists.newArrayListWithExpectedSize(numResponses);
+
+    int count = 0;
+    for (EncryptionZoneInt ezi : tailMap.values()) {
+      zones.add(new EncryptionZoneWithId(getFullPathName(ezi),
+          ezi.getKeyName(), ezi.getINodeId()));
+      count++;
+      if (count >= numResponses) {
+        break;
+      }
+    }
+    final boolean hasMore = (numResponses < tailMap.size());
+    return new BatchedListEntries<EncryptionZoneWithId>(zones, hasMore);
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 7780525..d231462 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_ENCRYPTION_ZONE;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.CRYPTO_XATTR_FILE_ENCRYPTION_INFO;
 import static org.apache.hadoop.util.Time.now;
 
 import java.io.Closeable;
@@ -29,11 +32,13 @@
 import java.util.ListIterator;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
@@ -49,10 +54,12 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
 import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
 import org.apache.hadoop.hdfs.protocol.FsAclPermission;
@@ -64,6 +71,8 @@
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
+import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@@ -112,9 +121,14 @@
       + DOT_RESERVED_STRING;
   public final static byte[] DOT_RESERVED = 
       DFSUtil.string2Bytes(DOT_RESERVED_STRING);
+  private final static String RAW_STRING = "raw";
+  private final static byte[] RAW = DFSUtil.string2Bytes(RAW_STRING);
   public final static String DOT_INODES_STRING = ".inodes";
   public final static byte[] DOT_INODES = 
       DFSUtil.string2Bytes(DOT_INODES_STRING);
+  private final XAttr KEYID_XATTR =
+      XAttrHelper.buildXAttr(CRYPTO_XATTR_ENCRYPTION_ZONE, null);
+
   INodeDirectory rootDir;
   private final FSNamesystem namesystem;
   private volatile boolean skipQuotaCheck = false; //skip while consuming edits
@@ -151,7 +165,7 @@
   }
 
   boolean hasReadLock() {
-    return this.dirLock.getReadHoldCount() > 0;
+    return this.dirLock.getReadHoldCount() > 0 || hasWriteLock();
   }
 
   public int getReadHoldCount() {
@@ -162,6 +176,9 @@
     return this.dirLock.getWriteHoldCount();
   }
 
+  @VisibleForTesting
+  public final EncryptionZoneManager ezManager;
+
   /**
    * Caches frequently used file names used in {@link INode} to reuse 
    * byte[] objects and reduce heap usage.
@@ -190,6 +207,7 @@
     this.inodeXAttrsLimit = conf.getInt(
         DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY,
         DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT);
+
     Preconditions.checkArgument(this.inodeXAttrsLimit >= 0,
         "Cannot set a negative limit on the number of xattrs per inode (%s).",
         DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY);
@@ -209,6 +227,8 @@
         + " times");
     nameCache = new NameCache<ByteArray>(threshold);
     namesystem = ns;
+
+    ezManager = new EncryptionZoneManager(this, conf);
   }
     
   private FSNamesystem getFSNamesystem() {
@@ -506,6 +526,7 @@
       return false;
     }
     
+    ezManager.checkMoveValidity(srcIIP, dstIIP, src);
     // Ensure dst has quota to accommodate rename
     verifyFsLimitsForRename(srcIIP, dstIIP);
     verifyQuotaForRename(srcIIP.getINodes(), dstIIP.getINodes());
@@ -584,6 +605,7 @@
       throw new IOException(error);
     }
 
+    ezManager.checkMoveValidity(srcIIP, dstIIP, src);
     final INode dstInode = dstIIP.getLastINode();
     List<INodeDirectory> snapshottableDirs = new ArrayList<INodeDirectory>();
     if (dstInode != null) { // Destination exists
@@ -1289,6 +1311,7 @@
   DirectoryListing getListing(String src, byte[] startAfter,
       boolean needLocation) throws UnresolvedLinkException, IOException {
     String srcs = normalizePath(src);
+    final boolean isRawPath = isReservedRawName(src);
 
     readLock();
     try {
@@ -1304,7 +1327,7 @@
       if (!targetNode.isDirectory()) {
         return new DirectoryListing(
             new HdfsFileStatus[]{createFileStatus(HdfsFileStatus.EMPTY_NAME,
-                targetNode, needLocation, snapshot)}, 0);
+                targetNode, needLocation, snapshot, isRawPath)}, 0);
       }
 
       final INodeDirectory dirInode = targetNode.asDirectory();
@@ -1318,7 +1341,7 @@
       for (int i=0; i<numOfListing && locationBudget>0; i++) {
         INode cur = contents.get(startChild+i);
         listing[i] = createFileStatus(cur.getLocalNameBytes(), cur,
-            needLocation, snapshot);
+            needLocation, snapshot, isRawPath);
         listingCnt++;
         if (needLocation) {
             // Once we  hit lsLimit locations, stop.
@@ -1369,7 +1392,7 @@
     for (int i = 0; i < numOfListing; i++) {
       Root sRoot = snapshots.get(i + skipSize).getRoot();
       listing[i] = createFileStatus(sRoot.getLocalNameBytes(), sRoot,
-          Snapshot.CURRENT_STATE_ID);
+          Snapshot.CURRENT_STATE_ID, false);
     }
     return new DirectoryListing(
         listing, snapshots.size() - skipSize - numOfListing);
@@ -1377,12 +1400,13 @@
 
   /** Get the file info for a specific file.
    * @param src The string representation of the path to the file
-   * @param resolveLink whether to throw UnresolvedLinkException 
+   * @param resolveLink whether to throw UnresolvedLinkException
+   * @param isRawPath true if a /.reserved/raw pathname was passed by the user
    * @return object containing information regarding the file
    *         or null if file not found
    */
-  HdfsFileStatus getFileInfo(String src, boolean resolveLink) 
-      throws UnresolvedLinkException {
+  HdfsFileStatus getFileInfo(String src, boolean resolveLink, boolean isRawPath)
+    throws IOException {
     String srcs = normalizePath(src);
     readLock();
     try {
@@ -1391,8 +1415,9 @@
       }
       final INodesInPath inodesInPath = getLastINodeInPath(srcs, resolveLink);
       final INode i = inodesInPath.getINode(0);
+
       return i == null? null: createFileStatus(HdfsFileStatus.EMPTY_NAME, i,
-          inodesInPath.getPathSnapshotId());
+          inodesInPath.getPathSnapshotId(), isRawPath);
     } finally {
       readUnlock();
     }
@@ -1409,7 +1434,7 @@
       throws UnresolvedLinkException {
     if (getINode4DotSnapshot(src) != null) {
       return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
-          HdfsFileStatus.EMPTY_NAME, -1L, 0);
+          HdfsFileStatus.EMPTY_NAME, -1L, 0, null);
     }
     return null;
   }
@@ -2037,6 +2062,19 @@
   public final void addToInodeMap(INode inode) {
     if (inode instanceof INodeWithAdditionalFields) {
       inodeMap.put(inode);
+      if (!inode.isSymlink()) {
+        final XAttrFeature xaf = inode.getXAttrFeature();
+        if (xaf != null) {
+          final List<XAttr> xattrs = xaf.getXAttrs();
+          for (XAttr xattr : xattrs) {
+            final String xaName = XAttrHelper.getPrefixName(xattr);
+            if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) {
+              ezManager.addEncryptionZone(inode.getId(),
+                  new String(xattr.getValue()));
+            }
+          }
+        }
+      }
     }
   }
   
@@ -2048,6 +2086,7 @@
       for (INode inode : inodes) {
         if (inode != null && inode instanceof INodeWithAdditionalFields) {
           inodeMap.remove(inode);
+          ezManager.removeEncryptionZone(inode.getId());
         }
       }
     }
@@ -2217,22 +2256,25 @@
    * @param path the local name
    * @param node inode
    * @param needLocation if block locations need to be included or not
+   * @param isRawPath true if this is being called on behalf of a path in
+   *                  /.reserved/raw
    * @return a file status
    * @throws IOException if any error occurs
    */
   private HdfsFileStatus createFileStatus(byte[] path, INode node,
-      boolean needLocation, int snapshot) throws IOException {
+      boolean needLocation, int snapshot, boolean isRawPath)
+      throws IOException {
     if (needLocation) {
-      return createLocatedFileStatus(path, node, snapshot);
+      return createLocatedFileStatus(path, node, snapshot, isRawPath);
     } else {
-      return createFileStatus(path, node, snapshot);
+      return createFileStatus(path, node, snapshot, isRawPath);
     }
   }
   /**
    * Create FileStatus by file INode 
    */
    HdfsFileStatus createFileStatus(byte[] path, INode node,
-       int snapshot) {
+       int snapshot, boolean isRawPath) throws IOException {
      long size = 0;     // length is zero for directories
      short replication = 0;
      long blocksize = 0;
@@ -2244,7 +2286,10 @@
      }
      int childrenNum = node.isDirectory() ? 
          node.asDirectory().getChildrenNum(snapshot) : 0;
-         
+
+     FileEncryptionInfo feInfo = isRawPath ? null :
+         getFileEncryptionInfo(node, snapshot);
+
      return new HdfsFileStatus(
         size, 
         node.isDirectory(), 
@@ -2258,19 +2303,22 @@
         node.isSymlink() ? node.asSymlink().getSymlink() : null,
         path,
         node.getId(),
-        childrenNum);
+        childrenNum,
+        feInfo);
   }
 
   /**
    * Create FileStatus with location info by file INode
    */
   private HdfsLocatedFileStatus createLocatedFileStatus(byte[] path,
-      INode node, int snapshot) throws IOException {
+      INode node, int snapshot, boolean isRawPath) throws IOException {
     assert hasReadLock();
     long size = 0; // length is zero for directories
     short replication = 0;
     long blocksize = 0;
     LocatedBlocks loc = null;
+    final FileEncryptionInfo feInfo = isRawPath ? null :
+        getFileEncryptionInfo(node, snapshot);
     if (node.isFile()) {
       final INodeFile fileNode = node.asFile();
       size = fileNode.computeFileSize(snapshot);
@@ -2281,16 +2329,17 @@
       final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
       final long fileSize = !inSnapshot && isUc ? 
           fileNode.computeFileSizeNotIncludingLastUcBlock() : size;
+
       loc = getFSNamesystem().getBlockManager().createLocatedBlocks(
           fileNode.getBlocks(), fileSize, isUc, 0L, size, false,
-          inSnapshot);
+          inSnapshot, feInfo);
       if (loc == null) {
         loc = new LocatedBlocks();
       }
     }
     int childrenNum = node.isDirectory() ? 
         node.asDirectory().getChildrenNum(snapshot) : 0;
-        
+
     HdfsLocatedFileStatus status =
         new HdfsLocatedFileStatus(size, node.isDirectory(), replication,
           blocksize, node.getModificationTime(snapshot),
@@ -2298,7 +2347,7 @@
           getPermissionForFileStatus(node, snapshot),
           node.getUserName(snapshot), node.getGroupName(snapshot),
           node.isSymlink() ? node.asSymlink().getSymlink() : null, path,
-          node.getId(), loc, childrenNum);
+          node.getId(), loc, childrenNum, feInfo);
     // Set caching information for the located blocks.
     if (loc != null) {
       CacheManager cacheManager = namesystem.getCacheManager();
@@ -2545,6 +2594,8 @@
       for (ListIterator<XAttr> it = toFilter.listIterator(); it.hasNext()
           ;) {
         XAttr filter = it.next();
+        Preconditions.checkArgument(!KEYID_XATTR.equalsIgnoreValue(filter),
+            "The encryption zone xattr should never be deleted.");
         if (a.equalsIgnoreValue(filter)) {
           add = false;
           it.remove();
@@ -2559,7 +2610,111 @@
 
     return newXAttrs;
   }
-  
+
+  boolean isInAnEZ(INodesInPath iip)
+      throws UnresolvedLinkException, SnapshotAccessControlException {
+    readLock();
+    try {
+      return ezManager.isInAnEZ(iip);
+    } finally {
+      readUnlock();
+    }
+  }
+
+  String getKeyName(INodesInPath iip) {
+    readLock();
+    try {
+      return ezManager.getKeyName(iip);
+    } finally {
+      readUnlock();
+    }
+  }
+
+  XAttr createEncryptionZone(String src, String keyName)
+    throws IOException {
+    writeLock();
+    try {
+      return ezManager.createEncryptionZone(src, keyName);
+    } finally {
+      writeUnlock();
+    }
+  }
+
+  EncryptionZoneWithId getEZForPath(INodesInPath iip) {
+    readLock();
+    try {
+      return ezManager.getEZINodeForPath(iip);
+    } finally {
+      readUnlock();
+    }
+  }
+
+  BatchedListEntries<EncryptionZoneWithId> listEncryptionZones(long prevId)
+      throws IOException {
+    readLock();
+    try {
+      return ezManager.listEncryptionZones(prevId);
+    } finally {
+      readUnlock();
+    }
+  }
+
+  /**
+   * Set the FileEncryptionInfo for an INode.
+   */
+  void setFileEncryptionInfo(String src, FileEncryptionInfo info)
+      throws IOException {
+    // Make the PB for the xattr
+    final HdfsProtos.FileEncryptionInfoProto proto = PBHelper.convert(info);
+    final byte[] protoBytes = proto.toByteArray();
+    final XAttr fileEncryptionAttr =
+        XAttrHelper.buildXAttr(CRYPTO_XATTR_FILE_ENCRYPTION_INFO, protoBytes);
+    final List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
+    xAttrs.add(fileEncryptionAttr);
+
+    writeLock();
+    try {
+      unprotectedSetXAttrs(src, xAttrs, EnumSet.of(XAttrSetFlag.CREATE));
+    } finally {
+      writeUnlock();
+    }
+  }
+
+  /**
+   * Return the FileEncryptionInfo for an INode, or null if the INode is not
+   * an encrypted file.
+   */
+  FileEncryptionInfo getFileEncryptionInfo(INode inode, int snapshotId)
+      throws IOException {
+    if (!inode.isFile()) {
+      return null;
+    }
+    readLock();
+    try {
+      List<XAttr> xAttrs = XAttrStorage.readINodeXAttrs(inode, snapshotId);
+      if (xAttrs == null) {
+        return null;
+      }
+      for (XAttr x : xAttrs) {
+        if (XAttrHelper.getPrefixName(x)
+            .equals(CRYPTO_XATTR_FILE_ENCRYPTION_INFO)) {
+          try {
+            HdfsProtos.FileEncryptionInfoProto proto =
+                HdfsProtos.FileEncryptionInfoProto.parseFrom(x.getValue());
+            FileEncryptionInfo feInfo = PBHelper.convert(proto);
+            return feInfo;
+          } catch (InvalidProtocolBufferException e) {
+            throw new IOException("Could not parse file encryption info for " +
+                "inode " + inode, e);
+          }
+        }
+      }
+      return null;
+    } finally {
+      readUnlock();
+    }
+  }
+
   void setXAttrs(final String src, final List<XAttr> xAttrs,
       final EnumSet<XAttrSetFlag> flag) throws IOException {
     writeLock();
@@ -2570,7 +2725,7 @@
     }
   }
   
-  void unprotectedSetXAttrs(final String src, final List<XAttr> xAttrs,
+  INode unprotectedSetXAttrs(final String src, final List<XAttr> xAttrs,
       final EnumSet<XAttrSetFlag> flag)
       throws QuotaExceededException, IOException {
     assert hasWriteLock();
@@ -2579,7 +2734,20 @@
     int snapshotId = iip.getLatestSnapshotId();
     List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);
     List<XAttr> newXAttrs = setINodeXAttrs(existingXAttrs, xAttrs, flag);
+
+    /*
+     * If we're adding the encryption zone xattr, then add src to the list
+     * of encryption zones.
+     */
+    for (XAttr xattr : newXAttrs) {
+      final String xaName = XAttrHelper.getPrefixName(xattr);
+      if (CRYPTO_XATTR_ENCRYPTION_ZONE.equals(xaName)) {
+        ezManager.addEncryptionZone(inode.getId(), new String(xattr.getValue()));
+      }
+    }
+
     XAttrStorage.updateINodeXAttrs(inode, newXAttrs, snapshotId);
+    return inode;
   }
 
   List<XAttr> setINodeXAttrs(final List<XAttr> existingXAttrs,
@@ -2736,27 +2904,73 @@
     return src.startsWith(DOT_RESERVED_PATH_PREFIX);
   }
 
+  static boolean isReservedRawName(String src) {
+    return src.startsWith(DOT_RESERVED_PATH_PREFIX +
+        Path.SEPARATOR + RAW_STRING);
+  }
+
   /**
-   * Resolve the path of /.reserved/.inodes/<inodeid>/... to a regular path
+   * Resolve a /.reserved/... path to a non-reserved path.
+   * <p/>
+   * There are two special hierarchies under /.reserved/:
+   * <p/>
+   * /.reserved/.inodes/<inodeid> performs a path lookup by inodeid,
+   * <p/>
+   * /.reserved/raw/... returns the encrypted (raw) bytes of a file in an
+   * encryption zone. For instance, if /ezone is an encryption zone, then
+   * /ezone/a refers to the decrypted file and /.reserved/raw/ezone/a refers to
+   * the encrypted (raw) bytes of /ezone/a.
+   * <p/>
+   * Pathnames in the /.reserved/raw directory that resolve to files not in an
+   * encryption zone are equivalent to the corresponding non-raw path. Hence,
+   * if /a/b/c refers to a file that is not in an encryption zone, then
+   * /.reserved/raw/a/b/c is equivalent (they both refer to the same
+   * unencrypted file).
    * 
    * @param src path that is being processed
    * @param pathComponents path components corresponding to the path
    * @param fsd FSDirectory
-   * @return if the path indicates an inode, return path after replacing upto
+   * @return if the path indicates an inode, return path after replacing up to
    *         <inodeid> with the corresponding path of the inode, else the path
-   *         in {@code src} as is.
+   *         in {@code src} as is. If the path refers to a path in the "raw"
+   *         directory, return the non-raw pathname.
    * @throws FileNotFoundException if inodeid is invalid
    */
-  static String resolvePath(String src, byte[][] pathComponents, FSDirectory fsd)
+  static String resolvePath(String src, byte[][] pathComponents,
+      FSDirectory fsd) throws FileNotFoundException {
+    final int nComponents = (pathComponents == null) ?
+        0 : pathComponents.length;
+    if (nComponents <= 2) {
+      return src;
+    }
+    if (!Arrays.equals(DOT_RESERVED, pathComponents[1])) {
+      /* This is not a /.reserved/ path so do nothing. */
+      return src;
+    }
+
+    if (Arrays.equals(DOT_INODES, pathComponents[2])) {
+      /* It's a /.reserved/.inodes path. */
+      if (nComponents > 3) {
+        return resolveDotInodesPath(src, pathComponents, fsd);
+      } else {
+        return src;
+      }
+    } else if (Arrays.equals(RAW, pathComponents[2])) {
+      /* It's /.reserved/raw so strip off the /.reserved/raw prefix. */
+      if (nComponents == 3) {
+        return Path.SEPARATOR;
+      } else {
+        return constructRemainingPath("", pathComponents, 3);
+      }
+    } else {
+      /* It's some sort of /.reserved/<unknown> path. Ignore it. */
+      return src;
+    }
+  }
+
+  private static String resolveDotInodesPath(String src,
+      byte[][] pathComponents, FSDirectory fsd)
       throws FileNotFoundException {
-    if (pathComponents == null || pathComponents.length <= 3) {
-      return src;
-    }
-    // Not /.reserved/.inodes
-    if (!Arrays.equals(DOT_RESERVED, pathComponents[1])
-        || !Arrays.equals(DOT_INODES, pathComponents[2])) { // Not .inodes path
-      return src;
-    }
     final String inodeId = DFSUtil.bytes2String(pathComponents[3]);
     final long id;
     try {
@@ -2785,10 +2999,20 @@
       }
     }
 
-    StringBuilder path = id == INodeId.ROOT_INODE_ID ? new StringBuilder()
-        : new StringBuilder(inode.getFullPathName());
-    for (int i = 4; i < pathComponents.length; i++) {
-      path.append(Path.SEPARATOR).append(DFSUtil.bytes2String(pathComponents[i]));
+    String path = "";
+    if (id != INodeId.ROOT_INODE_ID) {
+      path = inode.getFullPathName();
+    }
+    return constructRemainingPath(path, pathComponents, 4);
+  }
+
+  private static String constructRemainingPath(String pathPrefix,
+      byte[][] pathComponents, int startAt) {
+
+    StringBuilder path = new StringBuilder(pathPrefix);
+    for (int i = startAt; i < pathComponents.length; i++) {
+      path.append(Path.SEPARATOR).append(
+          DFSUtil.bytes2String(pathComponents[i]));
     }
     if (NameNode.LOG.isDebugEnabled()) {
       NameNode.LOG.debug("Resolved path is " + path);
@@ -2833,7 +3057,7 @@
    * @throws UnresolvedLinkException if symlink can't be resolved
    * @throws SnapshotAccessControlException if path is in RO snapshot
    */
-  private INodesInPath getINodesInPath4Write(String src, boolean resolveLink)
+  INodesInPath getINodesInPath4Write(String src, boolean resolveLink)
           throws UnresolvedLinkException, SnapshotAccessControlException {
     final byte[][] components = INode.getPathComponents(src);
     INodesInPath inodesInPath = INodesInPath.resolve(rootDir, components,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index a721491..d522e51 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -364,7 +364,8 @@
         // add the op into retry cache if necessary
         if (toAddRetryCache) {
           HdfsFileStatus stat = fsNamesys.dir.createFileStatus(
-              HdfsFileStatus.EMPTY_NAME, newFile, Snapshot.CURRENT_STATE_ID);
+              HdfsFileStatus.EMPTY_NAME, newFile, Snapshot.CURRENT_STATE_ID,
+              false);
           fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
               addCloseOp.rpcCallId, stat);
         }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 0cb90bd..81d5a22 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
+import static org.apache.hadoop.crypto.key.KeyProviderCryptoExtension
+    .EncryptedKeyVersion;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
@@ -102,6 +105,8 @@
 import java.lang.management.ManagementFactory;
 import java.net.InetAddress;
 import java.net.URI;
+import java.security.GeneralSecurityException;
+import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -115,6 +120,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantLock;
@@ -130,12 +136,17 @@
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.crypto.CryptoCodec;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.InvalidPathException;
@@ -159,6 +170,7 @@
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.UnknownCipherSuiteException;
 import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -170,6 +182,8 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -325,7 +339,7 @@
   private HdfsFileStatus getAuditFileInfo(String path, boolean resolveSymlink)
       throws IOException {
     return (isAuditEnabled() && isExternalInvocation())
-        ? dir.getFileInfo(path, resolveSymlink) : null;
+        ? dir.getFileInfo(path, resolveSymlink, false) : null;
   }
   
   private void logAuditEvent(boolean succeeded, String cmd, String src)
@@ -411,6 +425,8 @@
   private final CacheManager cacheManager;
   private final DatanodeStatistics datanodeStatistics;
 
+  private String nameserviceId;
+
   private RollingUpgradeInfo rollingUpgradeInfo = null;
   /**
    * A flag that indicates whether the checkpointer should checkpoint a rollback
@@ -526,6 +542,11 @@
 
   private final NNConf nnConf;
 
+  private KeyProviderCryptoExtension provider = null;
+  private KeyProvider.Options providerOptions = null;
+
+  private final CryptoCodec codec;
+
   private volatile boolean imageLoaded = false;
   private final Condition cond;
 
@@ -745,6 +766,14 @@
    */
   FSNamesystem(Configuration conf, FSImage fsImage, boolean ignoreRetryCache)
       throws IOException {
+    provider = DFSUtil.createKeyProviderCryptoExtension(conf);
+    if (provider == null) {
+      LOG.info("No KeyProvider found.");
+    } else {
+      LOG.info("Found KeyProvider: " + provider.toString());
+    }
+    providerOptions = KeyProvider.options(conf);
+    this.codec = CryptoCodec.getInstance(conf);
     if (conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY,
                         DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT)) {
       LOG.info("Enabling async auditlog");
@@ -776,7 +805,7 @@
 
       // block allocation has to be persisted in HA using a shared edits directory
       // so that the standby has up-to-date namespace information
-      String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
+      nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
       this.haEnabled = HAUtil.isHAEnabled(conf, nameserviceId);  
       
       // Sanity check the HA-related config.
@@ -903,6 +932,11 @@
   }
 
   @VisibleForTesting
+  public KeyProviderCryptoExtension getProvider() {
+    return provider;
+  }
+
+  @VisibleForTesting
   static RetryCache initRetryCache(Configuration conf) {
     boolean enable = conf.getBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY,
         DFS_NAMENODE_ENABLE_RETRY_CACHE_DEFAULT);
@@ -1630,9 +1664,10 @@
     }
   }
 
-  private void setPermissionInt(String src, FsPermission permission)
+  private void setPermissionInt(final String srcArg, FsPermission permission)
       throws AccessControlException, FileNotFoundException, SafeModeException,
       UnresolvedLinkException, IOException {
+    String src = srcArg;
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
@@ -1641,7 +1676,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set permission for " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOwner(pc, src);
       dir.setPermission(src, permission);
       getEditLog().logSetPermissions(src, permission);
@@ -1650,7 +1685,7 @@
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "setPermission", src, null, resultingStat);
+    logAuditEvent(true, "setPermission", srcArg, null, resultingStat);
   }
 
   /**
@@ -1668,9 +1703,10 @@
     } 
   }
 
-  private void setOwnerInt(String src, String username, String group)
+  private void setOwnerInt(final String srcArg, String username, String group)
       throws AccessControlException, FileNotFoundException, SafeModeException,
       UnresolvedLinkException, IOException {
+    String src = srcArg;
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
@@ -1679,7 +1715,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set owner for " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOwner(pc, src);
       if (!pc.isSuperUser()) {
         if (username != null && !pc.getUser().equals(username)) {
@@ -1696,7 +1732,7 @@
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "setOwner", src, null, resultingStat);
+    logAuditEvent(true, "setOwner", srcArg, null, resultingStat);
   }
 
   /**
@@ -1779,10 +1815,11 @@
    * Get block locations within the specified range, updating the
    * access times if necessary. 
    */
-  private LocatedBlocks getBlockLocationsUpdateTimes(String src, long offset,
-      long length, boolean doAccessTime, boolean needBlockToken)
+  private LocatedBlocks getBlockLocationsUpdateTimes(final String srcArg,
+      long offset, long length, boolean doAccessTime, boolean needBlockToken)
       throws FileNotFoundException,
       UnresolvedLinkException, IOException {
+    String src = srcArg;
     FSPermissionChecker pc = getPermissionChecker();
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     for (int attempt = 0; attempt < 2; attempt++) {
@@ -1794,7 +1831,7 @@
         checkOperation(OperationCategory.WRITE);
         writeLock(); // writelock is needed to set accesstime
       }
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       try {
         if (isReadOp) {
           checkOperation(OperationCategory.READ);
@@ -1838,9 +1875,14 @@
           length = Math.min(length, fileSize - offset);
           isUc = false;
         }
-        LocatedBlocks blocks =
+
+        final FileEncryptionInfo feInfo =
+          FSDirectory.isReservedRawName(srcArg) ?
+          null : dir.getFileEncryptionInfo(inode, iip.getPathSnapshotId());
+
+        final LocatedBlocks blocks =
           blockManager.createLocatedBlocks(inode.getBlocks(), fileSize,
-            isUc, offset, length, needBlockToken, iip.isSnapshot());
+            isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo);
         // Set caching information for the located blocks.
         for (LocatedBlock lb: blocks.getLocatedBlocks()) {
           cacheManager.setCachedLocations(lb);
@@ -2061,8 +2103,9 @@
     }
   }
 
-  private void setTimesInt(String src, long mtime, long atime) 
+  private void setTimesInt(final String srcArg, long mtime, long atime)
     throws IOException, UnresolvedLinkException {
+    String src = srcArg;
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.WRITE);
@@ -2071,7 +2114,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set times " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
 
       // Write access is required to set access and modification times
       if (isPermissionEnabled) {
@@ -2092,7 +2135,7 @@
     } finally {
       writeUnlock();
     }
-    logAuditEvent(true, "setTimes", src, null, resultingStat);
+    logAuditEvent(true, "setTimes", srcArg, null, resultingStat);
   }
 
   /**
@@ -2123,9 +2166,10 @@
     }
   }
 
-  private void createSymlinkInt(String target, String link,
+  private void createSymlinkInt(String target, final String linkArg,
       PermissionStatus dirPerms, boolean createParent, boolean logRetryCache) 
       throws IOException, UnresolvedLinkException {
+    String link = linkArg;
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.createSymlink: target="
           + target + " link=" + link);
@@ -2138,7 +2182,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot create symlink " + link);
-      link = FSDirectory.resolvePath(link, pathComponents, dir);
+      link = resolvePath(link, pathComponents);
       if (!createParent) {
         verifyParentDir(link);
       }
@@ -2159,7 +2203,7 @@
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "createSymlink", link, target, resultingStat);
+    logAuditEvent(true, "createSymlink", linkArg, target, resultingStat);
   }
 
   /**
@@ -2185,8 +2229,9 @@
     }
   }
 
-  private boolean setReplicationInt(String src, final short replication)
-      throws IOException {
+  private boolean setReplicationInt(final String srcArg,
+      final short replication) throws IOException {
+    String src = srcArg;
     blockManager.verifyReplication(src, replication, null);
     final boolean isFile;
     FSPermissionChecker pc = getPermissionChecker();
@@ -2197,7 +2242,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set replication for " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       if (isPermissionEnabled) {
         checkPathAccess(pc, src, FsAction.WRITE);
       }
@@ -2215,7 +2260,7 @@
 
     getEditLog().logSync();
     if (isFile) {
-      logAuditEvent(true, "setReplication", src);
+      logAuditEvent(true, "setReplication", srcArg);
     }
     return isFile;
   }
@@ -2228,7 +2273,7 @@
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      filename = FSDirectory.resolvePath(filename, pathComponents, dir);
+      filename = resolvePath(filename, pathComponents);
       if (isPermissionEnabled) {
         checkTraverse(pc, filename);
       }
@@ -2256,7 +2301,74 @@
       }
     }
   }
-  
+
+  /**
+   * If the file is within an encryption zone, select the appropriate 
+   * CipherSuite from the list provided by the client. Since the client may 
+   * be newer, need to handle unknown CipherSuites.
+   *
+   * @param srcIIP path of the file
+   * @param cipherSuites client-provided list of supported CipherSuites, 
+   *                     in desired order.
+   * @return chosen CipherSuite, or null if file is not in an EncryptionZone
+   * @throws IOException
+   */
+  private CipherSuite chooseCipherSuite(INodesInPath srcIIP, List<CipherSuite>
+      cipherSuites)
+      throws UnknownCipherSuiteException, UnresolvedLinkException,
+        SnapshotAccessControlException {
+    // Not in an EZ
+    if (!dir.isInAnEZ(srcIIP)) {
+      return null;
+    }
+    CipherSuite chosen = null;
+    for (CipherSuite c : cipherSuites) {
+      if (c.equals(CipherSuite.UNKNOWN)) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Ignoring unknown CipherSuite provided by client: "
+              + c.getUnknownValue());
+        }
+        continue;
+      }
+      for (CipherSuite supported : CipherSuite.values()) {
+        if (supported.equals(c)) {
+          chosen = c;
+          break;
+        }
+      }
+    }
+    if (chosen == null) {
+      throw new UnknownCipherSuiteException(
+          "No cipher suites provided by the client are supported."
+              + " Client provided: " + Arrays.toString(cipherSuites.toArray())
+              + " NameNode supports: " + Arrays.toString(CipherSuite.values()));
+    }
+    return chosen;
+  }
+
+  /**
+   * Invoke KeyProvider APIs to generate an encrypted data encryption key for an
+   * encryption zone. Should not be called with any locks held.
+   *
+   * @param ezKeyName key name of an encryption zone
+   * @return New EDEK, or null if ezKeyName is null
+   * @throws IOException
+   */
+  private EncryptedKeyVersion generateEncryptedDataEncryptionKey(String
+      ezKeyName) throws IOException {
+    if (ezKeyName == null) {
+      return null;
+    }
+    EncryptedKeyVersion edek = null;
+    try {
+      edek = provider.generateEncryptedKey(ezKeyName);
+    } catch (GeneralSecurityException e) {
+      throw new IOException(e);
+    }
+    Preconditions.checkNotNull(edek);
+    return edek;
+  }
+
   /**
    * Create a new file entry in the namespace.
    * 
@@ -2266,7 +2378,8 @@
    */
   HdfsFileStatus startFile(String src, PermissionStatus permissions,
       String holder, String clientMachine, EnumSet<CreateFlag> flag,
-      boolean createParent, short replication, long blockSize)
+      boolean createParent, short replication, long blockSize, 
+      List<CipherSuite> cipherSuites)
       throws AccessControlException, SafeModeException,
       FileAlreadyExistsException, UnresolvedLinkException,
       FileNotFoundException, ParentNotDirectoryException, IOException {
@@ -2279,7 +2392,8 @@
     
     try {
       status = startFileInt(src, permissions, holder, clientMachine, flag,
-          createParent, replication, blockSize, cacheEntry != null);
+          createParent, replication, blockSize, cipherSuites,
+          cacheEntry != null);
     } catch (AccessControlException e) {
       logAuditEvent(false, "create", src);
       throw e;
@@ -2289,19 +2403,30 @@
     return status;
   }
 
-  private HdfsFileStatus startFileInt(String src, PermissionStatus permissions,
-      String holder, String clientMachine, EnumSet<CreateFlag> flag,
-      boolean createParent, short replication, long blockSize,
-      boolean logRetryCache) throws AccessControlException, SafeModeException,
+  private HdfsFileStatus startFileInt(final String srcArg,
+      PermissionStatus permissions, String holder, String clientMachine,
+      EnumSet<CreateFlag> flag, boolean createParent, short replication,
+      long blockSize, List<CipherSuite> cipherSuites, boolean logRetryCache)
+      throws AccessControlException, SafeModeException,
       FileAlreadyExistsException, UnresolvedLinkException,
       FileNotFoundException, ParentNotDirectoryException, IOException {
+    String src = srcArg;
     if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: src=" + src
-          + ", holder=" + holder
-          + ", clientMachine=" + clientMachine
-          + ", createParent=" + createParent
-          + ", replication=" + replication
-          + ", createFlag=" + flag.toString());
+      StringBuilder builder = new StringBuilder();
+      builder.append("DIR* NameSystem.startFile: src=" + src
+              + ", holder=" + holder
+              + ", clientMachine=" + clientMachine
+              + ", createParent=" + createParent
+              + ", replication=" + replication
+              + ", createFlag=" + flag.toString()
+              + ", blockSize=" + blockSize);
+      builder.append(", cipherSuites=");
+      if (cipherSuites != null) {
+        builder.append(Arrays.toString(cipherSuites.toArray()));
+      } else {
+        builder.append("null");
+      }
+      NameNode.stateChangeLog.debug(builder.toString());
     }
     if (!DFSUtil.isValidName(src)) {
       throw new InvalidPathException(src);
@@ -2322,27 +2447,92 @@
     boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
 
     waitForLoadingFSImage();
-    writeLock();
+
+    /*
+     * We want to avoid holding any locks while doing KeyProvider operations,
+     * since they can be very slow. Since the path can
+     * flip flop between being in an encryption zone and not in the meantime,
+     * we need to recheck the preconditions and redo KeyProvider operations
+     * in some situations.
+     *
+     * A special RetryStartFileException is used to indicate that we should
+     * retry creation of a FileEncryptionInfo.
+     */
     try {
-      checkOperation(OperationCategory.WRITE);
-      checkNameNodeSafeMode("Cannot create file" + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
-      startFileInternal(pc, src, permissions, holder, clientMachine, create,
-          overwrite, createParent, replication, blockSize, logRetryCache);
-      stat = dir.getFileInfo(src, false);
-    } catch (StandbyException se) {
-      skipSync = true;
-      throw se;
+      boolean shouldContinue = true;
+      int iters = 0;
+      while (shouldContinue) {
+        skipSync = false;
+        if (iters >= 10) {
+          throw new IOException("Too many retries because of encryption zone " +
+              "operations, something might be broken!");
+        }
+        shouldContinue = false;
+        iters++;
+
+        // Optimistically determine CipherSuite and ezKeyName if the path is
+        // currently within an encryption zone
+        CipherSuite suite = null;
+        String ezKeyName = null;
+        readLock();
+        try {
+          src = resolvePath(src, pathComponents);
+          INodesInPath iip = dir.getINodesInPath4Write(src);
+          // Nothing to do if the path is not within an EZ
+          if (dir.isInAnEZ(iip)) {
+            suite = chooseCipherSuite(iip, cipherSuites);
+            if (suite != null) {
+              Preconditions.checkArgument(!suite.equals(CipherSuite.UNKNOWN),
+                  "Chose an UNKNOWN CipherSuite!");
+            }
+            ezKeyName = dir.getKeyName(iip);
+            Preconditions.checkState(ezKeyName != null);
+          }
+        } finally {
+          readUnlock();
+        }
+
+        Preconditions.checkState(
+            (suite == null && ezKeyName == null) ||
+            (suite != null && ezKeyName != null),
+            "Both suite and ezKeyName should both be null or not null");
+        // Generate EDEK if necessary while not holding the lock
+        EncryptedKeyVersion edek =
+            generateEncryptedDataEncryptionKey(ezKeyName);
+        EncryptionFaultInjector.getInstance().startFileAfterGenerateKey();
+        // Try to create the file with the computed cipher suite and EDEK
+        writeLock();
+        try {
+          checkOperation(OperationCategory.WRITE);
+          checkNameNodeSafeMode("Cannot create file" + src);
+          src = resolvePath(src, pathComponents);
+          startFileInternal(pc, src, permissions, holder, clientMachine, create,
+              overwrite, createParent, replication, blockSize, suite, edek,
+              logRetryCache);
+          stat = dir.getFileInfo(src, false,
+              FSDirectory.isReservedRawName(srcArg));
+        } catch (StandbyException se) {
+          skipSync = true;
+          throw se;
+        } catch (RetryStartFileException e) {
+          shouldContinue = true;
+          if (LOG.isTraceEnabled()) {
+            LOG.trace("Preconditions failed, retrying creation of " +
+                    "FileEncryptionInfo", e);
+          }
+        } finally {
+          writeUnlock();
+        }
+      }
     } finally {
-      writeUnlock();
       // There might be transactions logged while trying to recover the lease.
       // They need to be sync'ed even when an exception was thrown.
       if (!skipSync) {
         getEditLog().logSync();
       }
-    } 
+    }
 
-    logAuditEvent(true, "create", src, null, stat);
+    logAuditEvent(true, "create", srcArg, null, stat);
     return stat;
   }
 
@@ -2358,10 +2548,11 @@
   private void startFileInternal(FSPermissionChecker pc, String src,
       PermissionStatus permissions, String holder, String clientMachine,
       boolean create, boolean overwrite, boolean createParent,
-      short replication, long blockSize, boolean logRetryEntry)
+      short replication, long blockSize, CipherSuite suite,
+      EncryptedKeyVersion edek, boolean logRetryEntry)
       throws FileAlreadyExistsException, AccessControlException,
       UnresolvedLinkException, FileNotFoundException,
-      ParentNotDirectoryException, IOException {
+      ParentNotDirectoryException, RetryStartFileException, IOException {
     assert hasWriteLock();
     // Verify that the destination does not exist as a directory already.
     final INodesInPath iip = dir.getINodesInPath4Write(src);
@@ -2370,6 +2561,26 @@
       throw new FileAlreadyExistsException(src +
           " already exists as a directory");
     }
+
+    FileEncryptionInfo feInfo = null;
+    if (dir.isInAnEZ(iip)) {
+      // The path is now within an EZ, but we're missing encryption parameters
+      if (suite == null || edek == null) {
+        throw new RetryStartFileException();
+      }
+      // Path is within an EZ and we have provided encryption parameters.
+      // Make sure that the generated EDEK matches the settings of the EZ.
+      String ezKeyName = dir.getKeyName(iip);
+      if (!ezKeyName.equals(edek.getEncryptionKeyName())) {
+        throw new RetryStartFileException();
+      }
+      feInfo = new FileEncryptionInfo(suite,
+          edek.getEncryptedKeyVersion().getMaterial(),
+          edek.getEncryptedKeyIv(),
+          edek.getEncryptionKeyVersionName());
+      Preconditions.checkNotNull(feInfo);
+    }
+
     final INodeFile myFile = INodeFile.valueOf(inode, src, true);
     if (isPermissionEnabled) {
       if (overwrite && myFile != null) {
@@ -2422,6 +2633,12 @@
       leaseManager.addLease(newNode.getFileUnderConstructionFeature()
           .getClientName(), src);
 
+      // Set encryption attributes if necessary
+      if (feInfo != null) {
+        dir.setFileEncryptionInfo(src, feInfo);
+        newNode = dir.getInode(newNode.getId()).asFile();
+      }
+
       // record file record in log, record new generation stamp
       getEditLog().logOpenFile(src, newNode, logRetryEntry);
       if (NameNode.stateChangeLog.isDebugEnabled()) {
@@ -2434,7 +2651,7 @@
       throw ie;
     }
   }
-  
+
   /**
    * Append to an existing file for append.
    * <p>
@@ -2560,7 +2777,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot recover the lease of " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       final INodeFile inode = INodeFile.valueOf(dir.getINode(src), src);
       if (!inode.isUnderConstruction()) {
         return true;
@@ -2687,11 +2904,12 @@
     }
   }
 
-  private LocatedBlock appendFileInt(String src, String holder,
+  private LocatedBlock appendFileInt(final String srcArg, String holder,
       String clientMachine, boolean logRetryCache)
       throws AccessControlException, SafeModeException,
       FileAlreadyExistsException, FileNotFoundException,
       ParentNotDirectoryException, IOException {
+    String src = srcArg;
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: src=" + src
           + ", holder=" + holder
@@ -2706,7 +2924,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot append to file" + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       lb = appendFileInternal(pc, src, holder, clientMachine, logRetryCache);
     } catch (StandbyException se) {
       skipSync = true;
@@ -2727,7 +2945,7 @@
             +" block size " + lb.getBlock().getNumBytes());
       }
     }
-    logAuditEvent(true, "append", src);
+    logAuditEvent(true, "append", srcArg);
     return lb;
   }
 
@@ -2772,7 +2990,7 @@
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       LocatedBlock[] onRetryBlock = new LocatedBlock[1];
       FileState fileState = analyzeFileState(
           src, fileId, clientName, previous, onRetryBlock);
@@ -2995,7 +3213,7 @@
       checkOperation(OperationCategory.READ);
       //check safe mode
       checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" + blk);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
 
       //check lease
       final INode inode;
@@ -3048,7 +3266,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot abandon block " + b + " for file" + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
 
       final INode inode;
       if (fileId == INodeId.GRANDFATHER_INODE_ID) {
@@ -3130,9 +3348,10 @@
    *         (e.g if not all blocks have reached minimum replication yet)
    * @throws IOException on error (eg lease mismatch, file not open, file deleted)
    */
-  boolean completeFile(String src, String holder,
+  boolean completeFile(final String srcArg, String holder,
                        ExtendedBlock last, long fileId)
     throws SafeModeException, UnresolvedLinkException, IOException {
+    String src = srcArg;
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.completeFile: " +
           src + " for " + holder);
@@ -3146,7 +3365,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot complete file " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       success = completeFileInternal(src, holder,
         ExtendedBlock.getLocalBlock(last), fileId);
     } finally {
@@ -3154,7 +3373,7 @@
     }
     getEditLog().logSync();
     if (success) {
-      NameNode.stateChangeLog.info("DIR* completeFile: " + src
+      NameNode.stateChangeLog.info("DIR* completeFile: " + srcArg
           + " is closed by " + holder);
     }
     return success;
@@ -3322,8 +3541,11 @@
     return ret;
   }
 
-  private boolean renameToInt(String src, String dst, boolean logRetryCache) 
+  private boolean renameToInt(final String srcArg, final String dstArg,
+    boolean logRetryCache)
     throws IOException, UnresolvedLinkException {
+    String src = srcArg;
+    String dst = dstArg;
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: " + src +
           " to " + dst);
@@ -3342,8 +3564,8 @@
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot rename " + src);
       waitForLoadingFSImage();
-      src = FSDirectory.resolvePath(src, srcComponents, dir);
-      dst = FSDirectory.resolvePath(dst, dstComponents, dir);
+      src = resolvePath(src, srcComponents);
+      dst = resolvePath(dst, dstComponents);
       checkOperation(OperationCategory.WRITE);
       status = renameToInternal(pc, src, dst, logRetryCache);
       if (status) {
@@ -3354,7 +3576,7 @@
     }
     getEditLog().logSync();
     if (status) {
-      logAuditEvent(true, "rename", src, dst, resultingStat);
+      logAuditEvent(true, "rename", srcArg, dstArg, resultingStat);
     }
     return status;
   }
@@ -3392,8 +3614,10 @@
   
 
   /** Rename src to dst */
-  void renameTo(String src, String dst, Options.Rename... options)
-      throws IOException, UnresolvedLinkException {
+  void renameTo(final String srcArg, final String dstArg,
+      Options.Rename... options) throws IOException, UnresolvedLinkException {
+    String src = srcArg;
+    String dst = dstArg;
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.renameTo: with options - "
           + src + " to " + dst);
@@ -3416,8 +3640,8 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot rename " + src);
-      src = FSDirectory.resolvePath(src, srcComponents, dir);
-      dst = FSDirectory.resolvePath(dst, dstComponents, dir);
+      src = resolvePath(src, srcComponents);
+      dst = resolvePath(dst, dstComponents);
       renameToInternal(pc, src, dst, cacheEntry != null, options);
       resultingStat = getAuditFileInfo(dst, false);
       success = true;
@@ -3431,7 +3655,7 @@
       for (Rename option : options) {
         cmd.append(option.value()).append(" ");
       }
-      logAuditEvent(true, cmd.toString(), src, dst, resultingStat);
+      logAuditEvent(true, cmd.toString(), srcArg, dstArg, resultingStat);
     }
   }
 
@@ -3529,7 +3753,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot delete " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       if (!recursive && dir.isNonEmptyDirectory(src)) {
         throw new PathIsNotEmptyDirectoryException(src + " is non empty");
       }
@@ -3537,6 +3761,7 @@
         checkPermission(pc, src, false, null, FsAction.WRITE, null,
             FsAction.ALL, true, false);
       }
+
       long mtime = now();
       // Unlink the target directory from directory tree
       long filesRemoved = dir.delete(src, collectedBlocks, removedINodes,
@@ -3672,7 +3897,7 @@
   /**
    * Get the file info for a specific file.
    *
-   * @param src The string representation of the path to the file
+   * @param srcArg The string representation of the path to the file
    * @param resolveLink whether to throw UnresolvedLinkException 
    *        if src refers to a symlink
    *
@@ -3683,9 +3908,10 @@
    *         or null if file not found
    * @throws StandbyException 
    */
-  HdfsFileStatus getFileInfo(String src, boolean resolveLink) 
+  HdfsFileStatus getFileInfo(final String srcArg, boolean resolveLink)
     throws AccessControlException, UnresolvedLinkException,
            StandbyException, IOException {
+    String src = srcArg;
     if (!DFSUtil.isValidName(src)) {
       throw new InvalidPathException("Invalid file name: " + src);
     }
@@ -3696,34 +3922,36 @@
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       if (isPermissionEnabled) {
         checkPermission(pc, src, false, null, null, null, null, false,
             resolveLink);
       }
-      stat = dir.getFileInfo(src, resolveLink);
+      stat = dir.getFileInfo(src, resolveLink,
+          FSDirectory.isReservedRawName(srcArg));
     } catch (AccessControlException e) {
-      logAuditEvent(false, "getfileinfo", src);
+      logAuditEvent(false, "getfileinfo", srcArg);
       throw e;
     } finally {
       readUnlock();
     }
-    logAuditEvent(true, "getfileinfo", src);
+    logAuditEvent(true, "getfileinfo", srcArg);
     return stat;
   }
   
   /**
    * Returns true if the file is closed
    */
-  boolean isFileClosed(String src) 
+  boolean isFileClosed(final String srcArg)
       throws AccessControlException, UnresolvedLinkException,
       StandbyException, IOException {
+    String src = srcArg;
     FSPermissionChecker pc = getPermissionChecker();  
     checkOperation(OperationCategory.READ);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     try {
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOperation(OperationCategory.READ);
       if (isPermissionEnabled) {
         checkTraverse(pc, src);
@@ -3731,7 +3959,7 @@
       return !INodeFile.valueOf(dir.getINode(src), src).isUnderConstruction();
     } catch (AccessControlException e) {
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(false, "isFileClosed", src);
+        logAuditEvent(false, "isFileClosed", srcArg);
       }
       throw e;
     } finally {
@@ -3754,8 +3982,9 @@
     return ret;
   }
 
-  private boolean mkdirsInt(String src, PermissionStatus permissions,
+  private boolean mkdirsInt(final String srcArg, PermissionStatus permissions,
       boolean createParent) throws IOException, UnresolvedLinkException {
+    String src = srcArg;
     if(NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* NameSystem.mkdirs: " + src);
     }
@@ -3771,7 +4000,7 @@
     try {
       checkOperation(OperationCategory.WRITE);   
       checkNameNodeSafeMode("Cannot create directory " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       status = mkdirsInternal(pc, src, permissions, createParent);
       if (status) {
         resultingStat = getAuditFileInfo(src, false);
@@ -3781,7 +4010,7 @@
     }
     getEditLog().logSync();
     if (status) {
-      logAuditEvent(true, "mkdirs", src, null, resultingStat);
+      logAuditEvent(true, "mkdirs", srcArg, null, resultingStat);
     }
     return status;
   }
@@ -3939,7 +4168,8 @@
    * @return object containing information regarding the file
    *         or null if file not found
    */
-  ContentSummary getContentSummary(String src) throws IOException {
+  ContentSummary getContentSummary(final String srcArg) throws IOException {
+    String src = srcArg;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.READ);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
@@ -3947,7 +4177,7 @@
     boolean success = true;
     try {
       checkOperation(OperationCategory.READ);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       if (isPermissionEnabled) {
         checkPermission(pc, src, false, null, null, null, FsAction.READ_EXECUTE);
       }
@@ -3958,7 +4188,7 @@
       throw ace;
     } finally {
       readUnlock();
-      logAuditEvent(success, "contentSummary", src);
+      logAuditEvent(success, "contentSummary", srcArg);
     }
   }
 
@@ -4009,7 +4239,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot fsync file " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       final INode inode;
       if (fileId == INodeId.GRANDFATHER_INODE_ID) {
         // Older clients may not have given us an inode ID to work with.
@@ -4475,9 +4705,10 @@
     }
   }
 
-  private DirectoryListing getListingInt(String src, byte[] startAfter,
-      boolean needLocation) 
+  private DirectoryListing getListingInt(final String srcArg, byte[] startAfter,
+      boolean needLocation)
     throws AccessControlException, UnresolvedLinkException, IOException {
+    String src = srcArg;
     DirectoryListing dl;
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.READ);
@@ -4486,7 +4717,7 @@
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
 
       // Get file name when startAfter is an INodePath
       if (FSDirectory.isReservedName(startAfterString)) {
@@ -4510,7 +4741,7 @@
           checkTraverse(pc, src);
         }
       }
-      logAuditEvent(true, "listStatus", src);
+      logAuditEvent(true, "listStatus", srcArg);
       dl = dir.getListing(src, startAfter, needLocation);
     } finally {
       readUnlock();
@@ -5925,6 +6156,28 @@
     checkPermission(pc, path, false, null, null, null, null);
   }
 
+  /**
+   * This is a wrapper for FSDirectory.resolvePath(). If the path passed
+   * is prefixed with /.reserved/raw, then it checks to ensure that the caller
+   * has super user privs.
+   *
+   * @param path The path to resolve.
+   * @param pathComponents path components corresponding to the path
+   * @return if the path indicates an inode, return path after replacing up to
+   *         <inodeid> with the corresponding path of the inode, else the path
+   *         in {@code src} as is. If the path refers to a path in the "raw"
+   *         directory, return the non-raw pathname.
+   * @throws FileNotFoundException
+   * @throws AccessControlException
+   */
+  private String resolvePath(String path, byte[][] pathComponents)
+      throws FileNotFoundException, AccessControlException {
+    if (FSDirectory.isReservedRawName(path)) {
+      checkSuperuserPrivilege();
+    }
+    return FSDirectory.resolvePath(path, pathComponents, dir);
+  }
+
   @Override
   public void checkSuperuserPrivilege()
       throws AccessControlException {
@@ -8146,7 +8399,9 @@
     return results;
   }
 
-  void modifyAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
+  void modifyAclEntries(final String srcArg, List<AclEntry> aclSpec)
+      throws IOException {
+    String src = srcArg;
     nnConf.checkAclsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
@@ -8156,7 +8411,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot modify ACL entries on " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOwner(pc, src);
       List<AclEntry> newAcl = dir.modifyAclEntries(src, aclSpec);
       getEditLog().logSetAcl(src, newAcl);
@@ -8165,10 +8420,12 @@
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "modifyAclEntries", src, null, resultingStat);
+    logAuditEvent(true, "modifyAclEntries", srcArg, null, resultingStat);
   }
 
-  void removeAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
+  void removeAclEntries(final String srcArg, List<AclEntry> aclSpec)
+      throws IOException {
+    String src = srcArg;
     nnConf.checkAclsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
@@ -8178,7 +8435,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot remove ACL entries on " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOwner(pc, src);
       List<AclEntry> newAcl = dir.removeAclEntries(src, aclSpec);
       getEditLog().logSetAcl(src, newAcl);
@@ -8187,10 +8444,11 @@
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "removeAclEntries", src, null, resultingStat);
+    logAuditEvent(true, "removeAclEntries", srcArg, null, resultingStat);
   }
 
-  void removeDefaultAcl(String src) throws IOException {
+  void removeDefaultAcl(final String srcArg) throws IOException {
+    String src = srcArg;
     nnConf.checkAclsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
@@ -8200,7 +8458,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot remove default ACL entries on " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOwner(pc, src);
       List<AclEntry> newAcl = dir.removeDefaultAcl(src);
       getEditLog().logSetAcl(src, newAcl);
@@ -8209,10 +8467,11 @@
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "removeDefaultAcl", src, null, resultingStat);
+    logAuditEvent(true, "removeDefaultAcl", srcArg, null, resultingStat);
   }
 
-  void removeAcl(String src) throws IOException {
+  void removeAcl(final String srcArg) throws IOException {
+    String src = srcArg;
     nnConf.checkAclsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
@@ -8222,7 +8481,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot remove ACL on " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOwner(pc, src);
       dir.removeAcl(src);
       getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST);
@@ -8231,10 +8490,11 @@
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "removeAcl", src, null, resultingStat);
+    logAuditEvent(true, "removeAcl", srcArg, null, resultingStat);
   }
 
-  void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
+  void setAcl(final String srcArg, List<AclEntry> aclSpec) throws IOException {
+    String src = srcArg;
     nnConf.checkAclsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
@@ -8244,7 +8504,7 @@
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set ACL on " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOwner(pc, src);
       List<AclEntry> newAcl = dir.setAcl(src, aclSpec);
       getEditLog().logSetAcl(src, newAcl);
@@ -8253,7 +8513,7 @@
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "setAcl", src, null, resultingStat);
+    logAuditEvent(true, "setAcl", srcArg, null, resultingStat);
   }
 
   AclStatus getAclStatus(String src) throws IOException {
@@ -8264,7 +8524,7 @@
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       if (isPermissionEnabled) {
         checkPermission(pc, src, false, null, null, null, null);
       }
@@ -8273,7 +8533,140 @@
       readUnlock();
     }
   }
-  
+
+  /**
+   * Create an encryption zone on directory src using the specified key.
+   *
+   * @param src     the path of a directory which will be the root of the
+   *                encryption zone. The directory must be empty.
+   * @param keyName name of a key which must be present in the configured
+   *                KeyProvider.
+   * @throws AccessControlException  if the caller is not the superuser.
+   * @throws UnresolvedLinkException if the path can't be resolved.
+   * @throws SafeModeException       if the Namenode is in safe mode.
+   */
+  void createEncryptionZone(final String src, final String keyName)
+    throws IOException, UnresolvedLinkException,
+      SafeModeException, AccessControlException {
+    final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
+    if (cacheEntry != null && cacheEntry.isSuccess()) {
+      return; // Return previous response
+    }
+
+    boolean success = false;
+    try {
+      if (provider == null) {
+        throw new IOException(
+            "Can't create an encryption zone for " + src +
+            " since no key provider is available.");
+      }
+      if (keyName == null || keyName.isEmpty()) {
+        throw new IOException("Must specify a key name when creating an " +
+            "encryption zone");
+      }
+      KeyVersion keyVersion = provider.getCurrentKey(keyName);
+      if (keyVersion == null) {
+        /*
+         * It would be nice if we threw something more specific than
+         * IOException when the key is not found, but the KeyProvider API
+         * doesn't provide for that. If that API is ever changed to throw
+         * something more specific (e.g. UnknownKeyException) then we can
+         * update this to match it, or better yet, just rethrow the
+         * KeyProvider's exception.
+         */
+        throw new IOException("Key " + keyName + " doesn't exist.");
+      }
+      createEncryptionZoneInt(src, keyName, cacheEntry != null);
+      success = true;
+    } catch (AccessControlException e) {
+      logAuditEvent(false, "createEncryptionZone", src);
+      throw e;
+    } finally {
+      RetryCache.setState(cacheEntry, success);
+    }
+  }
+
+  private void createEncryptionZoneInt(final String srcArg, String keyName,
+      final boolean logRetryCache) throws IOException {
+    String src = srcArg;
+    HdfsFileStatus resultingStat = null;
+    checkSuperuserPrivilege();
+    checkOperation(OperationCategory.WRITE);
+    final byte[][] pathComponents =
+      FSDirectory.getPathComponentsForReservedPath(src);
+    writeLock();
+    try {
+      checkSuperuserPrivilege();
+      checkOperation(OperationCategory.WRITE);
+      checkNameNodeSafeMode("Cannot create encryption zone on " + src);
+      src = resolvePath(src, pathComponents);
+
+      final XAttr ezXAttr = dir.createEncryptionZone(src, keyName);
+      List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
+      xAttrs.add(ezXAttr);
+      getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+      resultingStat = getAuditFileInfo(src, false);
+    } finally {
+      writeUnlock();
+    }
+    getEditLog().logSync();
+    logAuditEvent(true, "createEncryptionZone", srcArg, null, resultingStat);
+  }
+
+  /**
+   * Get the encryption zone for the specified path.
+   *
+   * @param srcArg the path of a file or directory to get the EZ for.
+   * @return the EZ of the of the path or null if none.
+   * @throws AccessControlException  if the caller is not the superuser.
+   * @throws UnresolvedLinkException if the path can't be resolved.
+   */
+  EncryptionZoneWithId getEZForPath(final String srcArg)
+    throws AccessControlException, UnresolvedLinkException, IOException {
+    String src = srcArg;
+    HdfsFileStatus resultingStat = null;
+    final byte[][] pathComponents =
+        FSDirectory.getPathComponentsForReservedPath(src);
+    boolean success = false;
+    final FSPermissionChecker pc = getPermissionChecker();
+    checkOperation(OperationCategory.READ);
+    readLock();
+    try {
+      if (isPermissionEnabled) {
+        checkPathAccess(pc, src, FsAction.READ);
+      }
+      checkOperation(OperationCategory.READ);
+      src = resolvePath(src, pathComponents);
+      final INodesInPath iip = dir.getINodesInPath(src, true);
+      final EncryptionZoneWithId ret = dir.getEZForPath(iip);
+      resultingStat = getAuditFileInfo(src, false);
+      success = true;
+      return ret;
+    } finally {
+      readUnlock();
+      logAuditEvent(success, "getEZForPath", srcArg, null, resultingStat);
+    }
+  }
+
+  BatchedListEntries<EncryptionZoneWithId> listEncryptionZones(long prevId)
+      throws IOException {
+    boolean success = false;
+    checkSuperuserPrivilege();
+    checkOperation(OperationCategory.READ);
+    readLock();
+    try {
+      checkSuperuserPrivilege();
+      checkOperation(OperationCategory.READ);
+      final BatchedListEntries<EncryptionZoneWithId> ret =
+          dir.listEncryptionZones(prevId);
+      success = true;
+      return ret;
+    } finally {
+      readUnlock();
+      logAuditEvent(success, "listEncryptionZones", null);
+    }
+  }
+
   /**
    * Set xattr for a file or directory.
    * 
@@ -8307,20 +8700,22 @@
     }
   }
   
-  private void setXAttrInt(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
-      boolean logRetryCache) throws IOException {
+  private void setXAttrInt(final String srcArg, XAttr xAttr,
+      EnumSet<XAttrSetFlag> flag, boolean logRetryCache) throws IOException {
+    String src = srcArg;
     nnConf.checkXAttrsConfigFlag();
     checkXAttrSize(xAttr);
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
-    XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
+    XAttrPermissionFilter.checkPermissionForApi(pc, xAttr,
+        FSDirectory.isReservedRawName(src));
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set XAttr on " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkXAttrChangeAccess(src, xAttr, pc);
       List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
       xAttrs.add(xAttr);
@@ -8331,7 +8726,7 @@
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "setXAttr", src, null, resultingStat);
+    logAuditEvent(true, "setXAttr", srcArg, null, resultingStat);
   }
 
   /**
@@ -8354,15 +8749,18 @@
     }
   }
   
-  List<XAttr> getXAttrs(String src, List<XAttr> xAttrs) throws IOException {
+  List<XAttr> getXAttrs(final String srcArg, List<XAttr> xAttrs)
+      throws IOException {
+    String src = srcArg;
     nnConf.checkXAttrsConfigFlag();
     FSPermissionChecker pc = getPermissionChecker();
+    final boolean isRawPath = FSDirectory.isReservedRawName(src);
     boolean getAll = xAttrs == null || xAttrs.isEmpty();
     if (!getAll) {
       try {
-        XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs);
+        XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs, isRawPath);
       } catch (AccessControlException e) {
-        logAuditEvent(false, "getXAttrs", src);
+        logAuditEvent(false, "getXAttrs", srcArg);
         throw e;
       }
     }
@@ -8370,14 +8768,14 @@
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     try {
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOperation(OperationCategory.READ);
       if (isPermissionEnabled) {
         checkPathAccess(pc, src, FsAction.READ);
       }
       List<XAttr> all = dir.getXAttrs(src);
       List<XAttr> filteredAll = XAttrPermissionFilter.
-          filterXAttrsForApi(pc, all);
+          filterXAttrsForApi(pc, all, isRawPath);
       if (getAll) {
         return filteredAll;
       } else {
@@ -8403,7 +8801,7 @@
         return toGet;
       }
     } catch (AccessControlException e) {
-      logAuditEvent(false, "getXAttrs", src);
+      logAuditEvent(false, "getXAttrs", srcArg);
       throw e;
     } finally {
       readUnlock();
@@ -8413,11 +8811,12 @@
   List<XAttr> listXAttrs(String src) throws IOException {
     nnConf.checkXAttrsConfigFlag();
     final FSPermissionChecker pc = getPermissionChecker();
+    final boolean isRawPath = FSDirectory.isReservedRawName(src);
     checkOperation(OperationCategory.READ);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     try {
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkOperation(OperationCategory.READ);
       if (isPermissionEnabled) {
         /* To access xattr names, you need EXECUTE in the owning directory. */
@@ -8425,7 +8824,7 @@
       }
       final List<XAttr> all = dir.getXAttrs(src);
       final List<XAttr> filteredAll = XAttrPermissionFilter.
-        filterXAttrsForApi(pc, all);
+        filterXAttrsForApi(pc, all, isRawPath);
       return filteredAll;
     } catch (AccessControlException e) {
       logAuditEvent(false, "listXAttrs", src);
@@ -8464,19 +8863,21 @@
     }
   }
 
-  void removeXAttrInt(String src, XAttr xAttr, boolean logRetryCache)
+  void removeXAttrInt(final String srcArg, XAttr xAttr, boolean logRetryCache)
       throws IOException {
+    String src = srcArg;
     nnConf.checkXAttrsConfigFlag();
     HdfsFileStatus resultingStat = null;
     FSPermissionChecker pc = getPermissionChecker();
-      XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
+    XAttrPermissionFilter.checkPermissionForApi(pc, xAttr,
+        FSDirectory.isReservedRawName(src));
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot remove XAttr entry on " + src);
-      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      src = resolvePath(src, pathComponents);
       checkXAttrChangeAccess(src, xAttr, pc);
 
       List<XAttr> xAttrs = Lists.newArrayListWithCapacity(1);
@@ -8493,7 +8894,7 @@
       writeUnlock();
     }
     getEditLog().logSync();
-    logAuditEvent(true, "removeXAttr", src, null, resultingStat);
+    logAuditEvent(true, "removeXAttr", srcArg, null, resultingStat);
   }
 
   private void checkXAttrChangeAccess(String src, XAttr xAttr,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 199d728..e17d403 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -37,6 +37,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -77,6 +78,7 @@
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -535,7 +537,8 @@
   @Override // ClientProtocol
   public HdfsFileStatus create(String src, FsPermission masked,
       String clientName, EnumSetWritable<CreateFlag> flag,
-      boolean createParent, short replication, long blockSize)
+      boolean createParent, short replication, long blockSize, 
+      List<CipherSuite> cipherSuites)
       throws IOException {
     String clientMachine = getClientMachine();
     if (stateChangeLog.isDebugEnabled()) {
@@ -549,7 +552,7 @@
     HdfsFileStatus fileStatus = namesystem.startFile(src, new PermissionStatus(
         getRemoteUser().getShortUserName(), null, masked),
         clientName, clientMachine, flag.get(), createParent, replication,
-        blockSize);
+        blockSize, cipherSuites);
     metrics.incrFilesCreated();
     metrics.incrCreateFileOps();
     return fileStatus;
@@ -1424,6 +1427,24 @@
   }
   
   @Override
+  public void createEncryptionZone(String src, String keyName)
+    throws IOException {
+    namesystem.createEncryptionZone(src, keyName);
+  }
+
+  @Override
+  public EncryptionZoneWithId getEZForPath(String src)
+    throws IOException {
+    return namesystem.getEZForPath(src);
+  }
+
+  @Override
+  public BatchedEntries<EncryptionZoneWithId> listEncryptionZones(
+      long prevId) throws IOException {
+    return namesystem.listEncryptionZones(prevId);
+  }
+
+  @Override
   public void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
       throws IOException {
     namesystem.setXAttr(src, xAttr, flag);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RetryStartFileException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RetryStartFileException.java
new file mode 100644
index 0000000..a5758a7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RetryStartFileException.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+public class RetryStartFileException extends Exception {
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
index 9873014..237f9d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
@@ -47,15 +47,27 @@
  * <br>
  * SYSTEM - extended system attributes: these are used by the HDFS
  * core and are not available through admin/user API.
+ * <br>
+ * RAW - extended system attributes: these are used for internal system
+ *   attributes that sometimes need to be exposed. Like SYSTEM namespace
+ *   attributes they are not visible to the user except when getXAttr/getXAttrs
+ *   is called on a file or directory in the /.reserved/raw HDFS directory
+ *   hierarchy. These attributes can only be accessed by the superuser.
+ * </br>
  */
 @InterfaceAudience.Private
 public class XAttrPermissionFilter {
   
-  static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr) 
+  static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr,
+      boolean isRawPath)
       throws AccessControlException {
+    final boolean isSuperUser = pc.isSuperUser();
     if (xAttr.getNameSpace() == XAttr.NameSpace.USER || 
-        (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && 
-        pc.isSuperUser())) {
+        (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && isSuperUser)) {
+      return;
+    }
+    if (xAttr.getNameSpace() == XAttr.NameSpace.RAW &&
+        isRawPath && isSuperUser) {
       return;
     }
     throw new AccessControlException("User doesn't have permission for xattr: "
@@ -63,30 +75,34 @@
   }
 
   static void checkPermissionForApi(FSPermissionChecker pc,
-                                    List<XAttr> xAttrs) throws AccessControlException {
+      List<XAttr> xAttrs, boolean isRawPath) throws AccessControlException {
     Preconditions.checkArgument(xAttrs != null);
     if (xAttrs.isEmpty()) {
       return;
     }
 
     for (XAttr xAttr : xAttrs) {
-      checkPermissionForApi(pc, xAttr);
+      checkPermissionForApi(pc, xAttr, isRawPath);
     }
   }
 
   static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc,
-      List<XAttr> xAttrs) {
+      List<XAttr> xAttrs, boolean isRawPath) {
     assert xAttrs != null : "xAttrs can not be null";
     if (xAttrs == null || xAttrs.isEmpty()) {
       return xAttrs;
     }
     
     List<XAttr> filteredXAttrs = Lists.newArrayListWithCapacity(xAttrs.size());
+    final boolean isSuperUser = pc.isSuperUser();
     for (XAttr xAttr : xAttrs) {
       if (xAttr.getNameSpace() == XAttr.NameSpace.USER) {
         filteredXAttrs.add(xAttr);
       } else if (xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED && 
-          pc.isSuperUser()) {
+          isSuperUser) {
+        filteredXAttrs.add(xAttr);
+      } else if (xAttr.getNameSpace() == XAttr.NameSpace.RAW &&
+          isSuperUser && isRawPath) {
         filteredXAttrs.add(xAttr);
       }
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
new file mode 100644
index 0000000..bb52ddd
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CryptoAdmin.java
@@ -0,0 +1,301 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.tools.TableListing;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Tool;
+
+/**
+ * This class implements crypto command-line operations.
+ */
+@InterfaceAudience.Private
+public class CryptoAdmin extends Configured implements Tool {
+
+  /**
+   * Maximum length for printed lines
+   */
+  private static final int MAX_LINE_WIDTH = 80;
+
+  public CryptoAdmin() {
+    this(null);
+  }
+
+  public CryptoAdmin(Configuration conf) {
+    super(conf);
+  }
+
+  @Override
+  public int run(String[] args) throws IOException {
+    if (args.length == 0) {
+      printUsage(false);
+      return 1;
+    }
+    final Command command = determineCommand(args[0]);
+    if (command == null) {
+      System.err.println("Can't understand command '" + args[0] + "'");
+      if (!args[0].startsWith("-")) {
+        System.err.println("Command names must start with dashes.");
+      }
+      printUsage(false);
+      return 1;
+    }
+    final List<String> argsList = new LinkedList<String>();
+    for (int j = 1; j < args.length; j++) {
+      argsList.add(args[j]);
+    }
+    try {
+      return command.run(getConf(), argsList);
+    } catch (IllegalArgumentException e) {
+      System.err.println(prettifyException(e));
+      return -1;
+    }
+  }
+
+  public static void main(String[] argsArray) throws IOException {
+    final CryptoAdmin cryptoAdmin = new CryptoAdmin(new Configuration());
+    System.exit(cryptoAdmin.run(argsArray));
+  }
+
+  private static DistributedFileSystem getDFS(Configuration conf)
+      throws IOException {
+    final FileSystem fs = FileSystem.get(conf);
+    if (!(fs instanceof DistributedFileSystem)) {
+      throw new IllegalArgumentException("FileSystem " + fs.getUri() +
+      " is not an HDFS file system");
+    }
+    return (DistributedFileSystem) fs;
+  }
+
+  /**
+   * NN exceptions contain the stack trace as part of the exception message.
+   * When it's a known error, pretty-print the error and squish the stack trace.
+   */
+  private static String prettifyException(Exception e) {
+    return e.getClass().getSimpleName() + ": " +
+      e.getLocalizedMessage().split("\n")[0];
+  }
+
+  private static TableListing getOptionDescriptionListing() {
+    final TableListing listing = new TableListing.Builder()
+      .addField("").addField("", true)
+      .wrapWidth(MAX_LINE_WIDTH).hideHeaders().build();
+    return listing;
+  }
+
+  interface Command {
+    String getName();
+    String getShortUsage();
+    String getLongUsage();
+    int run(Configuration conf, List<String> args) throws IOException;
+  }
+
+  private static class CreateZoneCommand implements Command {
+    @Override
+    public String getName() {
+      return "-createZone";
+    }
+
+    @Override
+    public String getShortUsage() {
+      return "[" + getName() + " -keyName <keyName> -path <path>]\n";
+    }
+
+    @Override
+    public String getLongUsage() {
+      final TableListing listing = getOptionDescriptionListing();
+      listing.addRow("<path>", "The path of the encryption zone to create. " +
+        "It must be an empty directory.");
+      listing.addRow("<keyName>", "Name of the key to use for the " +
+          "encryption zone.");
+      return getShortUsage() + "\n" +
+        "Create a new encryption zone.\n\n" +
+        listing.toString();
+    }
+
+    @Override
+    public int run(Configuration conf, List<String> args) throws IOException {
+      final String path = StringUtils.popOptionWithArgument("-path", args);
+      if (path == null) {
+        System.err.println("You must specify a path with -path.");
+        return 1;
+      }
+
+      final String keyName =
+          StringUtils.popOptionWithArgument("-keyName", args);
+      if (keyName == null) {
+        System.err.println("You must specify a key name with -keyName.");
+        return 1;
+      }
+
+      if (!args.isEmpty()) {
+        System.err.println("Can't understand argument: " + args.get(0));
+        return 1;
+      }
+
+      final DistributedFileSystem dfs = getDFS(conf);
+      try {
+        dfs.createEncryptionZone(new Path(path), keyName);
+        System.out.println("Added encryption zone " + path);
+      } catch (IOException e) {
+        System.err.println(prettifyException(e));
+        return 2;
+      }
+
+      return 0;
+    }
+  }
+
+  private static class ListZonesCommand implements Command {
+    @Override
+    public String getName() {
+      return "-listZones";
+    }
+
+    @Override
+    public String getShortUsage() {
+      return "[" + getName()+ "]\n";
+    }
+
+    @Override
+    public String getLongUsage() {
+      return getShortUsage() + "\n" +
+        "List all encryption zones. Requires superuser permissions.\n\n";
+    }
+
+    @Override
+    public int run(Configuration conf, List<String> args) throws IOException {
+      if (!args.isEmpty()) {
+        System.err.println("Can't understand argument: " + args.get(0));
+        return 1;
+      }
+
+      final DistributedFileSystem dfs = getDFS(conf);
+      try {
+        final TableListing listing = new TableListing.Builder()
+          .addField("").addField("", true)
+          .wrapWidth(MAX_LINE_WIDTH).hideHeaders().build();
+        final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones();
+        while (it.hasNext()) {
+          EncryptionZone ez = it.next();
+          listing.addRow(ez.getPath(), ez.getKeyName());
+        }
+        System.out.println(listing.toString());
+      } catch (IOException e) {
+        System.err.println(prettifyException(e));
+        return 2;
+      }
+
+      return 0;
+    }
+  }
+
+  private static class HelpCommand implements Command {
+    @Override
+    public String getName() {
+      return "-help";
+    }
+
+    @Override
+    public String getShortUsage() {
+      return "[-help <command-name>]\n";
+    }
+
+    @Override
+    public String getLongUsage() {
+      final TableListing listing = getOptionDescriptionListing();
+      listing.addRow("<command-name>", "The command for which to get " +
+          "detailed help. If no command is specified, print detailed help for " +
+          "all commands");
+      return getShortUsage() + "\n" +
+        "Get detailed help about a command.\n\n" +
+        listing.toString();
+    }
+
+    @Override
+    public int run(Configuration conf, List<String> args) throws IOException {
+      if (args.size() == 0) {
+        for (Command command : COMMANDS) {
+          System.err.println(command.getLongUsage());
+        }
+        return 0;
+      }
+      if (args.size() != 1) {
+        System.out.println("You must give exactly one argument to -help.");
+        return 0;
+      }
+      final String commandName = args.get(0);
+      // prepend a dash to match against the command names
+      final Command command = determineCommand("-"+commandName);
+      if (command == null) {
+        System.err.print("Sorry, I don't know the command '" +
+          commandName + "'.\n");
+        System.err.print("Valid help command names are:\n");
+        String separator = "";
+        for (Command c : COMMANDS) {
+          System.err.print(separator + c.getName().substring(1));
+          separator = ", ";
+        }
+        System.err.print("\n");
+        return 1;
+      }
+      System.err.print(command.getLongUsage());
+      return 0;
+    }
+  }
+
+  private static final Command[] COMMANDS = {
+    new CreateZoneCommand(),
+    new ListZonesCommand(),
+    new HelpCommand(),
+  };
+
+  private static void printUsage(boolean longUsage) {
+    System.err.println(
+        "Usage: bin/hdfs crypto [COMMAND]");
+    for (Command command : COMMANDS) {
+      if (longUsage) {
+        System.err.print(command.getLongUsage());
+      } else {
+        System.err.print("          " + command.getShortUsage());
+      }
+    }
+    System.err.println();
+  }
+
+  private static Command determineCommand(String commandName) {
+    for (int i = 0; i < COMMANDS.length; i++) {
+      if (COMMANDS[i].getName().equals(commandName)) {
+        return COMMANDS[i];
+      }
+    }
+    return null;
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index 3d5fee5..321630c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -252,7 +252,8 @@
             : childrenNumLong.intValue();
     return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
         blockSize, mTime, aTime, permission, owner, group,
-        symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum);
+        symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum,
+        null);
   }
 
   /** Convert an ExtendedBlock to a Json map. */
@@ -532,7 +533,7 @@
         (Map<?, ?>)m.get("lastLocatedBlock"));
     final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
     return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
-        lastLocatedBlock, isLastBlockComplete);
+        lastLocatedBlock, isLastBlockComplete, null);
   }
 
   /** Convert a ContentSummary to a Json string. */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
index cd291a6..edffc9a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
@@ -32,6 +32,7 @@
 import "hdfs.proto";
 import "acl.proto";
 import "xattr.proto";
+import "encryption.proto";
 
 /**
  * The ClientNamenodeProtocol Service defines the interface between a client 
@@ -73,6 +74,7 @@
   required bool createParent = 5;
   required uint32 replication = 6; // Short: Only 16 bits used
   required uint64 blockSize = 7;
+  repeated CipherSuite cipherSuites = 8;
 }
 
 message CreateResponseProto {
@@ -793,4 +795,10 @@
       returns(RemoveXAttrResponseProto);
   rpc checkAccess(CheckAccessRequestProto)
       returns(CheckAccessResponseProto);
+  rpc createEncryptionZone(CreateEncryptionZoneRequestProto)
+      returns(CreateEncryptionZoneResponseProto);
+  rpc listEncryptionZones(ListEncryptionZonesRequestProto)
+      returns(ListEncryptionZonesResponseProto);
+  rpc getEZForPath(GetEZForPathRequestProto)
+      returns(GetEZForPathResponseProto);
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto
new file mode 100644
index 0000000..ecf0970
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/encryption.proto
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+// This file contains protocol buffers that are used throughout HDFS -- i.e.
+// by the client, server, and data transfer protocols.
+
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "EncryptionZonesProtos";
+option java_generate_equals_and_hash = true;
+package hadoop.hdfs;
+
+import "hdfs.proto";
+
+message CreateEncryptionZoneRequestProto {
+  required string src = 1;
+  optional string keyName = 2;
+}
+
+message CreateEncryptionZoneResponseProto {
+}
+
+message ListEncryptionZonesRequestProto {
+  required int64 id = 1;
+}
+
+message EncryptionZoneWithIdProto {
+  required string path = 1;
+  required string keyName = 2;
+  required int64 id = 3;
+}
+
+message ListEncryptionZonesResponseProto {
+  repeated EncryptionZoneWithIdProto zones = 1;
+  required bool hasMore = 2;
+}
+
+message GetEZForPathRequestProto {
+    required string src = 1;
+}
+
+message GetEZForPathResponseProto {
+    required EncryptionZoneWithIdProto zone = 1;
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
index 32c54b0..a410224 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
@@ -200,6 +200,23 @@
   optional string encryptionAlgorithm = 6;
 }
 
+/**
+ * Cipher suite.
+ */
+enum CipherSuite {
+    UNKNOWN = 1;
+    AES_CTR_NOPADDING = 2;
+}
+
+/**
+ * Encryption information for a file.
+ */
+message FileEncryptionInfoProto {
+  required CipherSuite suite = 1;
+  required bytes key = 2;
+  required bytes iv = 3;
+  required string ezKeyVersionName = 4;
+}
 
 /**
  * A set of file blocks and their locations.
@@ -210,9 +227,9 @@
   required bool underConstruction = 3;
   optional LocatedBlockProto lastBlock = 4;
   required bool isLastBlockComplete = 5;
+  optional FileEncryptionInfoProto fileEncryptionInfo = 6;
 }
 
-
 /**
  * Status of a file, directory or symlink
  * Optionally includes a file's block locations if requested by client on the rpc call.
@@ -243,6 +260,9 @@
   // Optional field for fileId
   optional uint64 fileId = 13 [default = 0]; // default as an invalid id
   optional int32 childrenNum = 14 [default = -1];
+
+  // Optional field for file encryption
+  optional FileEncryptionInfoProto fileEncryptionInfo = 15;
 } 
 
 /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
index cb86ff2..acdc28e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
@@ -27,6 +27,7 @@
     TRUSTED   = 1;
     SECURITY  = 2;
     SYSTEM    = 3;
+    RAW       = 4;
   }
   
   required XAttrNamespaceProto namespace = 1;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index c1eb49f..0b0657b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2060,4 +2060,13 @@
     block layout (see HDFS-6482 for details on the layout).</description>
 </property>
 
+<property>
+  <name>dfs.namenode.list.encryption.zones.num.responses</name>
+  <value>100</value>
+  <description>When listing encryption zones, the maximum number of zones
+    that will be returned in a batch. Fetching the list incrementally in
+    batches improves namenode performance.
+  </description>
+</property>
+
 </configuration>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm
index 56aec0c..0a99fe5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ExtendedAttributes.apt.vm
@@ -30,7 +30,7 @@
 
 ** {Namespaces and Permissions}
 
-  In HDFS, as in Linux, there are four valid namespaces: <<<user>>>, <<<trusted>>>, <<<system>>>, and <<<security>>>. Each of these namespaces have different access restrictions.
+  In HDFS, there are five valid namespaces: <<<user>>>, <<<trusted>>>, <<<system>>>, <<<security>>>, and <<<raw>>>. Each of these namespaces have different access restrictions.
 
   The <<<user>>> namespace is the namespace that will commonly be used by client applications. Access to extended attributes in the user namespace is controlled by the corresponding file permissions.
 
@@ -40,6 +40,8 @@
 
   The <<<security>>> namespace is reserved for internal HDFS use. This namespace is not accessible through userspace methods. It is currently unused.
 
+ The <<<raw>>> namespace is reserved for internal system attributes that sometimes need to be exposed. Like <<<system>>> namespace attributes they are not visible to the user except when <<<getXAttr>>>/<<<getXAttrs>>> is called on a file or directory in the <<</.reserved/raw>>> HDFS directory hierarchy. These attributes can only be accessed by the superuser. An example of where <<<raw>>> namespace extended attributes are used is the <<<distcp>>> utility. Encryption zone meta data is stored in <<<raw.*>>> extended attributes, so as long as the administrator uses <<</.reserved/raw>>> pathnames in source and target, the encrypted files in the encryption zones are transparently copied.
+
 * {Interacting with extended attributes}
 
   The Hadoop shell has support for interacting with extended attributes via <<<hadoop fs -getfattr>>> and <<<hadoop fs -setfattr>>>. These commands are styled after the Linux {{{http://www.bestbits.at/acl/man/man1/getfattr.txt}getfattr(1)}} and {{{http://www.bestbits.at/acl/man/man1/setfattr.txt}setfattr(1)}} commands.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/TransparentEncryption.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/TransparentEncryption.apt.vm
new file mode 100644
index 0000000..3689a77
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/TransparentEncryption.apt.vm
@@ -0,0 +1,206 @@
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~   http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License. See accompanying LICENSE file.
+
+  ---
+  Hadoop Distributed File System-${project.version} - Transparent Encryption in HDFS
+  ---
+  ---
+  ${maven.build.timestamp}
+
+Transparent Encryption in HDFS
+
+%{toc|section=1|fromDepth=2|toDepth=3}
+
+* {Overview}
+
+  HDFS implements <transparent>, <end-to-end> encryption.
+  Once configured, data read from and written to HDFS is <transparently> encrypted and decrypted without requiring changes to user application code.
+  This encryption is also <end-to-end>, which means the data can only be encrypted and decrypted by the client.
+  HDFS never stores or has access to unencrypted data or data encryption keys.
+  This satisfies two typical requirements for encryption: <at-rest encryption> (meaning data on persistent media, such as a disk) as well as <in-transit encryption> (e.g. when data is travelling over the network).
+
+* {Use Cases}
+
+  Data encryption is required by a number of different government, financial, and regulatory entities.
+  For example, the health-care industry has HIPAA regulations, the card payment industry has PCI DSS regulations, and the US government has FISMA regulations.
+  Having transparent encryption built into HDFS makes it easier for organizations to comply with these regulations.
+
+  Encryption can also be performed at the application-level, but by integrating it into HDFS, existing applications can operate on encrypted data without changes.
+  This integrated architecture implies stronger encrypted file semantics and better coordination with other HDFS functions.
+
+* {Architecture}
+
+** {Key Management Server, KeyProvider, EDEKs}
+
+  A new cluster service is required to store, manage, and access encryption keys: the Hadoop <Key Management Server (KMS)>.
+  The KMS is a proxy that interfaces with a backing key store on behalf of HDFS daemons and clients.
+  Both the backing key store and the KMS implement the Hadoop KeyProvider client API.
+  See the {{{../../hadoop-kms/index.html}KMS documentation}} for more information.
+
+  In the KeyProvider API, each encryption key has a unique <key name>.
+  Because keys can be rolled, a key can have multiple <key versions>, where each key version has its own <key material> (the actual secret bytes used during encryption and decryption).
+  An encryption key can be fetched by either its key name, returning the latest version of the key, or by a specific key version.
+
+  The KMS implements additional functionality which enables creation and decryption of <encrypted encryption keys (EEKs)>.
+  Creation and decryption of EEKs happens entirely on the KMS.
+  Importantly, the client requesting creation or decryption of an EEK never handles the EEK's encryption key.
+  To create a new EEK, the KMS generates a new random key, encrypts it with the specified key, and returns the EEK to the client.
+  To decrypt an EEK, the KMS checks that the user has access to the encryption key, uses it to decrypt the EEK, and returns the decrypted encryption key.
+
+  In the context of HDFS encryption, EEKs are <encrypted data encryption keys (EDEKs)>, where a <data encryption key (DEK)> is what is used to encrypt and decrypt file data.
+  Typically, the key store is configured to only allow end users access to the keys used to encrypt DEKs.
+  This means that EDEKs can be safely stored and handled by HDFS, since the HDFS user will not have access to EDEK encryption keys.
+
+** {Encryption zones}
+
+  For transparent encryption, we introduce a new abstraction to HDFS: the <encryption zone>.
+  An encryption zone is a special directory whose contents will be transparently encrypted upon write and transparently decrypted upon read.
+  Each encryption zone is associated with a single <encryption zone key> which is specified when the zone is created.
+  Each file within an encryption zone has its own unique EDEK.
+
+  When creating a new file in an encryption zone, the NameNode asks the KMS to generate a new EDEK encrypted with the encryption zone's key.
+  The EDEK is then stored persistently as part of the file's metadata on the NameNode.
+
+  When reading a file within an encryption zone, the NameNode provides the client with the file's EDEK and the encryption zone key version used to encrypt the EDEK.
+  The client then asks the KMS to decrypt the EDEK, which involves checking that the client has permission to access the encryption zone key version.
+  Assuming that is successful, the client uses the DEK to decrypt the file's contents.
+
+  All of the above steps for the read and write path happen automatically through interactions between the DFSClient, the NameNode, and the KMS.
+
+  Access to encrypted file data and metadata is controlled by normal HDFS filesystem permissions.
+  This means that if HDFS is compromised (for example, by gaining unauthorized access to an HDFS superuser account), a malicious user only gains access to ciphertext and encrypted keys.
+  However, since access to encryption zone keys is controlled by a separate set of permissions on the KMS and key store, this does not pose a security threat.
+
+* {Configuration}
+
+  A necessary prerequisite is an instance of the KMS, as well as a backing key store for the KMS.
+  See the {{{../../hadoop-kms/index.html}KMS documentation}} for more information.
+
+** Selecting an encryption algorithm and codec
+
+*** hadoop.security.crypto.codec.classes.EXAMPLECIPHERSUITE
+
+  The prefix for a given crypto codec, contains a comma-separated list of implementation classes for a given crypto codec (eg EXAMPLECIPHERSUITE).
+  The first implementation will be used if available, others are fallbacks.
+
+*** hadoop.security.crypto.codec.classes.aes.ctr.nopadding
+
+  Default: <<<org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec,org.apache.hadoop.crypto.JceAesCtrCryptoCodec>>>
+
+  Comma-separated list of crypto codec implementations for AES/CTR/NoPadding.
+  The first implementation will be used if available, others are fallbacks.
+
+*** hadoop.security.crypto.cipher.suite
+
+  Default: <<<AES/CTR/NoPadding>>>
+
+  Cipher suite for crypto codec.
+
+*** hadoop.security.crypto.jce.provider
+
+  Default: None
+
+  The JCE provider name used in CryptoCodec.
+
+*** hadoop.security.crypto.buffer.size
+
+  Default: <<<8192>>>
+
+  The buffer size used by CryptoInputStream and CryptoOutputStream. 
+
+** Namenode configuration
+
+*** dfs.namenode.list.encryption.zones.num.responses
+
+  Default: <<<100>>>
+
+  When listing encryption zones, the maximum number of zones that will be returned in a batch.
+  Fetching the list incrementally in batches improves namenode performance.
+
+* {<<<crypto>>> command-line interface}
+
+** {createZone}
+
+  Usage: <<<[-createZone -keyName <keyName> -path <path>]>>>
+
+  Create a new encryption zone.
+
+*--+--+
+<path> | The path of the encryption zone to create. It must be an empty directory.
+*--+--+
+<keyName> | Name of the key to use for the encryption zone.
+*--+--+
+
+** {listZones}
+
+  Usage: <<<[-listZones]>>>
+
+  List all encryption zones. Requires superuser permissions.
+
+* {Attack vectors}
+
+** {Hardware access exploits}
+
+  These exploits assume that attacker has gained physical access to hard drives from cluster machines, i.e. datanodes and namenodes.
+
+  [[1]] Access to swap files of processes containing data encryption keys.
+
+        * By itself, this does not expose cleartext, as it also requires access to encrypted block files.
+
+        * This can be mitigated by disabling swap, using encrypted swap, or using mlock to prevent keys from being swapped out.
+
+  [[1]] Access to encrypted block files.
+
+        * By itself, this does not expose cleartext, as it also requires access to DEKs.
+
+** {Root access exploits}
+
+  These exploits assume that attacker has gained root shell access to cluster machines, i.e. datanodes and namenodes.
+  Many of these exploits cannot be addressed in HDFS, since a malicious root user has access to the in-memory state of processes holding encryption keys and cleartext.
+  For these exploits, the only mitigation technique is carefully restricting and monitoring root shell access.
+
+  [[1]] Access to encrypted block files.
+
+        * By itself, this does not expose cleartext, as it also requires access to encryption keys.
+
+  [[1]] Dump memory of client processes to obtain DEKs, delegation tokens, cleartext.
+
+        * No mitigation.
+
+  [[1]] Recording network traffic to sniff encryption keys and encrypted data in transit.
+
+        * By itself, insufficient to read cleartext without the EDEK encryption key.
+
+  [[1]] Dump memory of datanode process to obtain encrypted block data.
+
+        * By itself, insufficient to read cleartext without the DEK.
+
+  [[1]] Dump memory of namenode process to obtain encrypted data encryption keys.
+
+        * By itself, insufficient to read cleartext without the EDEK's encryption key and encrypted block files.
+
+** {HDFS admin exploits}
+
+  These exploits assume that the attacker has compromised HDFS, but does not have root or <<<hdfs>>> user shell access.
+
+  [[1]] Access to encrypted block files.
+
+        * By itself, insufficient to read cleartext without the EDEK and EDEK encryption key.
+
+  [[1]] Access to encryption zone and encrypted file metadata (including encrypted data encryption keys), via -fetchImage.
+
+        * By itself, insufficient to read cleartext without EDEK encryption keys.
+
+** {Rogue user exploits}
+
+  A rogue user can collect keys to which they have access, and use them later to decrypt encrypted data.
+  This can be mitigated through periodic key rolling policies.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
new file mode 100644
index 0000000..1c83829
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
@@ -0,0 +1,169 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.cli;
+
+import java.io.File;
+import java.io.IOException;
+import java.security.NoSuchAlgorithmException;
+import java.util.UUID;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.cli.util.CLICommand;
+import org.apache.hadoop.cli.util.CLICommandCryptoAdmin;
+import org.apache.hadoop.cli.util.CLICommandTypes;
+import org.apache.hadoop.cli.util.CLITestCmd;
+import org.apache.hadoop.cli.util.CryptoAdminCmdExecutor;
+import org.apache.hadoop.cli.util.CommandExecutor;
+import org.apache.hadoop.cli.util.CommandExecutor.Result;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HDFSPolicyProvider;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.tools.CryptoAdmin;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.xml.sax.SAXException;
+
+public class TestCryptoAdminCLI extends CLITestHelperDFS {
+  protected MiniDFSCluster dfsCluster = null;
+  protected FileSystem fs = null;
+  protected String namenode = null;
+  private static File tmpDir;
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
+        HDFSPolicyProvider.class, PolicyProvider.class);
+    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+
+    tmpDir = new File(System.getProperty("test.build.data", "target"),
+        UUID.randomUUID().toString()).getAbsoluteFile();
+    conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
+        JavaKeyStoreProvider.SCHEME_NAME + "://file" + tmpDir + "/test.jks");
+
+    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    dfsCluster.waitClusterUp();
+    createAKey("mykey", conf);
+    namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
+
+    username = System.getProperty("user.name");
+
+    fs = dfsCluster.getFileSystem();
+    assertTrue("Not an HDFS: " + fs.getUri(),
+        fs instanceof DistributedFileSystem);
+  }
+
+  @After
+  @Override
+  public void tearDown() throws Exception {
+    if (fs != null) {
+      fs.close();
+    }
+    if (dfsCluster != null) {
+      dfsCluster.shutdown();
+    }
+    Thread.sleep(2000);
+    super.tearDown();
+  }
+
+  /* Helper function to create a key in the Key Provider. */
+  private void createAKey(String keyName, Configuration conf)
+    throws NoSuchAlgorithmException, IOException {
+    final KeyProvider provider =
+        dfsCluster.getNameNode().getNamesystem().getProvider();
+    final KeyProvider.Options options = KeyProvider.options(conf);
+    provider.createKey(keyName, options);
+    provider.flush();
+    }
+
+  @Override
+  protected String getTestFile() {
+    return "testCryptoConf.xml";
+  }
+
+  @Override
+  protected String expandCommand(final String cmd) {
+    String expCmd = cmd;
+    expCmd = expCmd.replaceAll("NAMENODE", namenode);
+    expCmd = expCmd.replaceAll("#LF#",
+        System.getProperty("line.separator"));
+    expCmd = super.expandCommand(expCmd);
+    return expCmd;
+  }
+
+  @Override
+  protected TestConfigFileParser getConfigParser() {
+    return new TestConfigFileParserCryptoAdmin();
+  }
+
+  private class TestConfigFileParserCryptoAdmin extends
+      CLITestHelper.TestConfigFileParser {
+    @Override
+    public void endElement(String uri, String localName, String qName)
+        throws SAXException {
+      if (qName.equals("crypto-admin-command")) {
+        if (testCommands != null) {
+          testCommands.add(new CLITestCmdCryptoAdmin(charString,
+              new CLICommandCryptoAdmin()));
+        } else if (cleanupCommands != null) {
+          cleanupCommands.add(new CLITestCmdCryptoAdmin(charString,
+              new CLICommandCryptoAdmin()));
+        }
+      } else {
+        super.endElement(uri, localName, qName);
+      }
+    }
+  }
+
+  private class CLITestCmdCryptoAdmin extends CLITestCmd {
+    public CLITestCmdCryptoAdmin(String str, CLICommandTypes type) {
+      super(str, type);
+    }
+
+    @Override
+    public CommandExecutor getExecutor(String tag)
+        throws IllegalArgumentException {
+      if (getType() instanceof CLICommandCryptoAdmin) {
+        return new CryptoAdminCmdExecutor(tag, new CryptoAdmin(conf));
+      }
+      return super.getExecutor(tag);
+    }
+  }
+
+  @Override
+  protected Result execute(CLICommand cmd) throws Exception {
+    return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
+  }
+
+  @Test
+  @Override
+  public void testAll () {
+    super.testAll();
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCryptoAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCryptoAdmin.java
new file mode 100644
index 0000000..89d28a7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CLICommandCryptoAdmin.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.cli.util;
+
+public class CLICommandCryptoAdmin implements CLICommandTypes {
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CryptoAdminCmdExecutor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CryptoAdminCmdExecutor.java
new file mode 100644
index 0000000..f781bf8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/CryptoAdminCmdExecutor.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.cli.util;
+
+import org.apache.hadoop.hdfs.tools.CryptoAdmin;
+import org.apache.hadoop.util.ToolRunner;
+
+public class CryptoAdminCmdExecutor extends CommandExecutor {
+  protected String namenode = null;
+  protected CryptoAdmin admin = null;
+
+  public CryptoAdminCmdExecutor(String namenode, CryptoAdmin admin) {
+    this.namenode = namenode;
+    this.admin = admin;
+  }
+
+  @Override
+  protected void execute(final String cmd) throws Exception {
+    String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode);
+    ToolRunner.run(admin, args);
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
index 032a8df..e47658d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestXAttr.java
@@ -29,7 +29,7 @@
  * Tests for <code>XAttr</code> objects.
  */
 public class TestXAttr {
-  private static XAttr XATTR, XATTR1, XATTR2, XATTR3, XATTR4;
+  private static XAttr XATTR, XATTR1, XATTR2, XATTR3, XATTR4, XATTR5;
   
   @BeforeClass
   public static void setUp() throws Exception {
@@ -58,6 +58,11 @@
       .setName("name")
       .setValue(value)
       .build();
+    XATTR5 = new XAttr.Builder()
+      .setNameSpace(XAttr.NameSpace.RAW)
+      .setName("name")
+      .setValue(value)
+      .build();
   }
   
   @Test
@@ -65,14 +70,17 @@
     assertNotSame(XATTR1, XATTR2);
     assertNotSame(XATTR2, XATTR3);
     assertNotSame(XATTR3, XATTR4);
+    assertNotSame(XATTR4, XATTR5);
     assertEquals(XATTR, XATTR1);
     assertEquals(XATTR1, XATTR1);
     assertEquals(XATTR2, XATTR2);
     assertEquals(XATTR3, XATTR3);
     assertEquals(XATTR4, XATTR4);
+    assertEquals(XATTR5, XATTR5);
     assertFalse(XATTR1.equals(XATTR2));
     assertFalse(XATTR2.equals(XATTR3));
     assertFalse(XATTR3.equals(XATTR4));
+    assertFalse(XATTR4.equals(XATTR5));
   }
   
   @Test
@@ -81,5 +89,6 @@
     assertFalse(XATTR1.hashCode() == XATTR2.hashCode());
     assertFalse(XATTR2.hashCode() == XATTR3.hashCode());
     assertFalse(XATTR3.hashCode() == XATTR4.hashCode());
+    assertFalse(XATTR4.hashCode() == XATTR5.hashCode());
   }
 }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 68aa857..a57dd2f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -27,6 +27,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem.Statistics;
@@ -78,6 +79,7 @@
 import java.io.*;
 import java.net.*;
 import java.nio.ByteBuffer;
+import java.security.NoSuchAlgorithmException;
 import java.security.PrivilegedExceptionAction;
 import java.util.*;
 import java.util.concurrent.TimeoutException;
@@ -86,6 +88,7 @@
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /** Utilities for HDFS tests */
 public class DFSTestUtil {
@@ -1305,6 +1308,71 @@
   }
 
   /**
+   * Verify that two files have the same contents.
+   *
+   * @param fs The file system containing the two files.
+   * @param p1 The path of the first file.
+   * @param p2 The path of the second file.
+   * @param len The length of the two files.
+   * @throws IOException
+   */
+  public static void verifyFilesEqual(FileSystem fs, Path p1, Path p2, int len)
+      throws IOException {
+    final FSDataInputStream in1 = fs.open(p1);
+    final FSDataInputStream in2 = fs.open(p2);
+    for (int i = 0; i < len; i++) {
+      assertEquals("Mismatch at byte " + i, in1.read(), in2.read());
+    }
+    in1.close();
+    in2.close();
+  }
+
+  /**
+   * Verify that two files have different contents.
+   *
+   * @param fs The file system containing the two files.
+   * @param p1 The path of the first file.
+   * @param p2 The path of the second file.
+   * @param len The length of the two files.
+   * @throws IOException
+   */
+  public static void verifyFilesNotEqual(FileSystem fs, Path p1, Path p2,
+      int len)
+          throws IOException {
+    final FSDataInputStream in1 = fs.open(p1);
+    final FSDataInputStream in2 = fs.open(p2);
+    try {
+      for (int i = 0; i < len; i++) {
+        if (in1.read() != in2.read()) {
+          return;
+        }
+      }
+      fail("files are equal, but should not be");
+    } finally {
+      in1.close();
+      in2.close();
+    }
+  }
+
+  /**
+   * Helper function to create a key in the Key Provider.
+   *
+   * @param keyName The name of the key to create
+   * @param cluster The cluster to create it in
+   * @param conf Configuration to use
+   */
+  public static void createKey(String keyName, MiniDFSCluster cluster,
+                                Configuration conf)
+          throws NoSuchAlgorithmException, IOException {
+    KeyProvider provider = cluster.getNameNode().getNamesystem().getProvider();
+    final KeyProvider.Options options = KeyProvider.options(conf);
+    options.setDescription(keyName);
+    options.setBitLength(128);
+    provider.createKey(keyName, options);
+    provider.flush();
+  }
+
+  /**
    * @return the node which is expected to run the recovery of the
    * given block, which is known to be under construction inside the
    * given NameNOde.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index c11cdc3..74daccc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -24,6 +24,7 @@
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyList;
 import static org.mockito.Matchers.anyLong;
 import static org.mockito.Matchers.anyObject;
 import static org.mockito.Matchers.anyShort;
@@ -51,6 +52,7 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -253,16 +255,16 @@
     Mockito.doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
                 (short) 777), "owner", "group", new byte[0], new byte[0],
-                1010, 0)).when(mockNN).getFileInfo(anyString());
+                1010, 0, null)).when(mockNN).getFileInfo(anyString());
     
     Mockito.doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
                 (short) 777), "owner", "group", new byte[0], new byte[0],
-                1010, 0))
+                1010, 0, null))
         .when(mockNN)
         .create(anyString(), (FsPermission) anyObject(), anyString(),
             (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
-            anyShort(), anyLong());
+            anyShort(), anyLong(), (List<CipherSuite>) anyList());
 
     final DFSClient client = new DFSClient(null, mockNN, conf, null);
     OutputStream os = client.create("testfile", true);
@@ -494,7 +496,8 @@
       List<LocatedBlock> badBlocks = new ArrayList<LocatedBlock>();
       badBlocks.add(badLocatedBlock);
       return new LocatedBlocks(goodBlockList.getFileLength(), false,
-                               badBlocks, null, true);
+                               badBlocks, null, true,
+                               null);
     }
   }
   
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index aac16f4..2daf69d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -77,6 +77,13 @@
 
   static final String TEST_ROOT_DIR = PathUtils.getTestDirName(TestDFSShell.class);
 
+  private static final String RAW_A1 = "raw.a1";
+  private static final String TRUSTED_A1 = "trusted.a1";
+  private static final String USER_A1 = "user.a1";
+  private static final byte[] RAW_A1_VALUE = new byte[]{0x32, 0x32, 0x32};
+  private static final byte[] TRUSTED_A1_VALUE = new byte[]{0x31, 0x31, 0x31};
+  private static final byte[] USER_A1_VALUE = new byte[]{0x31, 0x32, 0x33};
+
   static Path writeFile(FileSystem fs, Path f) throws IOException {
     DataOutputStream out = fs.create(f);
     out.writeBytes("dhruba: " + f);
@@ -1664,8 +1671,8 @@
       final String group = status.getGroup();
       final FsPermission perm = status.getPermission();
       
-      fs.setXAttr(src, "user.a1", new byte[]{0x31, 0x32, 0x33});
-      fs.setXAttr(src, "trusted.a1", new byte[]{0x31, 0x31, 0x31});
+      fs.setXAttr(src, USER_A1, USER_A1_VALUE);
+      fs.setXAttr(src, TRUSTED_A1, TRUSTED_A1_VALUE);
       
       shell = new FsShell(conf);
       
@@ -1722,8 +1729,8 @@
       assertTrue(perm.equals(targetPerm));
       xattrs = fs.getXAttrs(target3);
       assertEquals(xattrs.size(), 2);
-      assertArrayEquals(new byte[]{0x31, 0x32, 0x33}, xattrs.get("user.a1"));
-      assertArrayEquals(new byte[]{0x31, 0x31, 0x31}, xattrs.get("trusted.a1"));
+      assertArrayEquals(USER_A1_VALUE, xattrs.get(USER_A1));
+      assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1));
       acls = fs.getAclStatus(target3).getEntries();
       assertTrue(acls.isEmpty());
       assertFalse(targetPerm.getAclBit());
@@ -1780,6 +1787,160 @@
     }
   }
 
+  @Test (timeout = 120000)
+  public void testCopyCommandsWithRawXAttrs() throws Exception {
+    final Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+      numDataNodes(1).format(true).build();
+    FsShell shell = null;
+    FileSystem fs = null;
+    final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithRawXAttrs-"
+      + counter.getAndIncrement();
+    final Path hdfsTestDir = new Path(testdir);
+    final Path rawHdfsTestDir = new Path("/.reserved/raw" + testdir);
+    try {
+      fs = cluster.getFileSystem();
+      fs.mkdirs(hdfsTestDir);
+      final Path src = new Path(hdfsTestDir, "srcfile");
+      final String rawSrcBase = "/.reserved/raw" + testdir;
+      final Path rawSrc = new Path(rawSrcBase, "srcfile");
+      fs.create(src).close();
+
+      final Path srcDir = new Path(hdfsTestDir, "srcdir");
+      final Path rawSrcDir = new Path("/.reserved/raw" + testdir, "srcdir");
+      fs.mkdirs(srcDir);
+      final Path srcDirFile = new Path(srcDir, "srcfile");
+      final Path rawSrcDirFile =
+              new Path("/.reserved/raw" + srcDirFile);
+      fs.create(srcDirFile).close();
+
+      final Path[] paths = { rawSrc, rawSrcDir, rawSrcDirFile };
+      final String[] xattrNames = { USER_A1, RAW_A1 };
+      final byte[][] xattrVals = { USER_A1_VALUE, RAW_A1_VALUE };
+
+      for (int i = 0; i < paths.length; i++) {
+        for (int j = 0; j < xattrNames.length; j++) {
+          fs.setXAttr(paths[i], xattrNames[j], xattrVals[j]);
+        }
+      }
+
+      shell = new FsShell(conf);
+
+      /* Check that a file as the source path works ok. */
+      doTestCopyCommandsWithRawXAttrs(shell, fs, src, hdfsTestDir, false);
+      doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrc, hdfsTestDir, false);
+      doTestCopyCommandsWithRawXAttrs(shell, fs, src, rawHdfsTestDir, false);
+      doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrc, rawHdfsTestDir, true);
+
+      /* Use a relative /.reserved/raw path. */
+      final Path savedWd = fs.getWorkingDirectory();
+      try {
+        fs.setWorkingDirectory(new Path(rawSrcBase));
+        final Path relRawSrc = new Path("../srcfile");
+        final Path relRawHdfsTestDir = new Path("..");
+        doTestCopyCommandsWithRawXAttrs(shell, fs, relRawSrc, relRawHdfsTestDir,
+                true);
+      } finally {
+        fs.setWorkingDirectory(savedWd);
+      }
+
+      /* Check that a directory as the source path works ok. */
+      doTestCopyCommandsWithRawXAttrs(shell, fs, srcDir, hdfsTestDir, false);
+      doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrcDir, hdfsTestDir, false);
+      doTestCopyCommandsWithRawXAttrs(shell, fs, srcDir, rawHdfsTestDir, false);
+      doTestCopyCommandsWithRawXAttrs(shell, fs, rawSrcDir, rawHdfsTestDir,
+        true);
+
+      /* Use relative in an absolute path. */
+      final String relRawSrcDir = "./.reserved/../.reserved/raw/../raw" +
+          testdir + "/srcdir";
+      final String relRawDstDir = "./.reserved/../.reserved/raw/../raw" +
+          testdir;
+      doTestCopyCommandsWithRawXAttrs(shell, fs, new Path(relRawSrcDir),
+          new Path(relRawDstDir), true);
+    } finally {
+      if (null != shell) {
+        shell.close();
+      }
+
+      if (null != fs) {
+        fs.delete(hdfsTestDir, true);
+        fs.close();
+      }
+      cluster.shutdown();
+    }
+  }
+
+  private void doTestCopyCommandsWithRawXAttrs(FsShell shell, FileSystem fs,
+      Path src, Path hdfsTestDir, boolean expectRaw) throws Exception {
+    Path target;
+    boolean srcIsRaw;
+    if (src.isAbsolute()) {
+      srcIsRaw = src.toString().contains("/.reserved/raw");
+    } else {
+      srcIsRaw = new Path(fs.getWorkingDirectory(), src).
+          toString().contains("/.reserved/raw");
+    }
+    final boolean destIsRaw = hdfsTestDir.toString().contains("/.reserved/raw");
+    final boolean srcDestMismatch = srcIsRaw ^ destIsRaw;
+
+    // -p (possibly preserve raw if src & dst are both /.r/r */
+    if (srcDestMismatch) {
+      doCopyAndTest(shell, hdfsTestDir, src, "-p", ERROR);
+    } else {
+      target = doCopyAndTest(shell, hdfsTestDir, src, "-p", SUCCESS);
+      checkXAttrs(fs, target, expectRaw, false);
+    }
+
+    // -px (possibly preserve raw, always preserve non-raw xattrs. */
+    if (srcDestMismatch) {
+      doCopyAndTest(shell, hdfsTestDir, src, "-px", ERROR);
+    } else {
+      target = doCopyAndTest(shell, hdfsTestDir, src, "-px", SUCCESS);
+      checkXAttrs(fs, target, expectRaw, true);
+    }
+
+    // no args (possibly preserve raw, never preserve non-raw xattrs. */
+    if (srcDestMismatch) {
+      doCopyAndTest(shell, hdfsTestDir, src, null, ERROR);
+    } else {
+      target = doCopyAndTest(shell, hdfsTestDir, src, null, SUCCESS);
+      checkXAttrs(fs, target, expectRaw, false);
+    }
+  }
+
+  private Path doCopyAndTest(FsShell shell, Path dest, Path src,
+      String cpArgs, int expectedExitCode) throws Exception {
+    final Path target = new Path(dest, "targetfile" +
+        counter.getAndIncrement());
+    final String[] argv = cpArgs == null ?
+        new String[] { "-cp",         src.toUri().toString(),
+            target.toUri().toString() } :
+        new String[] { "-cp", cpArgs, src.toUri().toString(),
+            target.toUri().toString() };
+    final int ret = ToolRunner.run(shell, argv);
+    assertEquals("cp -p is not working", expectedExitCode, ret);
+    return target;
+  }
+
+  private void checkXAttrs(FileSystem fs, Path target, boolean expectRaw,
+      boolean expectVanillaXAttrs) throws Exception {
+    final Map<String, byte[]> xattrs = fs.getXAttrs(target);
+    int expectedCount = 0;
+    if (expectRaw) {
+      assertArrayEquals("raw.a1 has incorrect value",
+          RAW_A1_VALUE, xattrs.get(RAW_A1));
+      expectedCount++;
+    }
+    if (expectVanillaXAttrs) {
+      assertArrayEquals("user.a1 has incorrect value",
+          USER_A1_VALUE, xattrs.get(USER_A1));
+      expectedCount++;
+    }
+    assertEquals("xattrs size mismatch", expectedCount, xattrs.size());
+  }
+
   // verify cp -ptopxa option will preserve directory attributes.
   @Test (timeout = 120000)
   public void testCopyCommandsToDirectoryWithPreserveOption()
@@ -1825,8 +1986,8 @@
       final String group = status.getGroup();
       final FsPermission perm = status.getPermission();
 
-      fs.setXAttr(srcDir, "user.a1", new byte[]{0x31, 0x32, 0x33});
-      fs.setXAttr(srcDir, "trusted.a1", new byte[]{0x31, 0x31, 0x31});
+      fs.setXAttr(srcDir, USER_A1, USER_A1_VALUE);
+      fs.setXAttr(srcDir, TRUSTED_A1, TRUSTED_A1_VALUE);
 
       shell = new FsShell(conf);
 
@@ -1883,8 +2044,8 @@
       assertTrue(perm.equals(targetPerm));
       xattrs = fs.getXAttrs(targetDir3);
       assertEquals(xattrs.size(), 2);
-      assertArrayEquals(new byte[]{0x31, 0x32, 0x33}, xattrs.get("user.a1"));
-      assertArrayEquals(new byte[]{0x31, 0x31, 0x31}, xattrs.get("trusted.a1"));
+      assertArrayEquals(USER_A1_VALUE, xattrs.get(USER_A1));
+      assertArrayEquals(TRUSTED_A1_VALUE, xattrs.get(TRUSTED_A1));
       acls = fs.getAclStatus(targetDir3).getEntries();
       assertTrue(acls.isEmpty());
       assertFalse(targetPerm.getAclBit());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index fb3fcae..caedca0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -104,7 +104,7 @@
     LocatedBlock l2 = new LocatedBlock(b2, ds, 0, true);
 
     List<LocatedBlock> ls = Arrays.asList(l1, l2);
-    LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true);
+    LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null);
 
     BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs);
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 0982ee2..b71cc32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -38,7 +38,6 @@
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Random;
-import java.util.concurrent.CancellationException;
 
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.logging.impl.Log4JLogger;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
new file mode 100644
index 0000000..1a13332
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java
@@ -0,0 +1,756 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderFactory;
+import org.apache.hadoop.fs.FSTestWrapper;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextTestWrapper;
+import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileSystemTestWrapper;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionFaultInjector;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesEqual;
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class TestEncryptionZones {
+
+  private Configuration conf;
+  private FileSystemTestHelper fsHelper;
+
+  private MiniDFSCluster cluster;
+  private HdfsAdmin dfsAdmin;
+  private DistributedFileSystem fs;
+  private File testRootDir;
+  private final String TEST_KEY = "testKey";
+
+  protected FileSystemTestWrapper fsWrapper;
+  protected FileContextTestWrapper fcWrapper;
+
+  @Before
+  public void setup() throws Exception {
+    conf = new HdfsConfiguration();
+    fsHelper = new FileSystemTestHelper();
+    // Set up java key store
+    String testRoot = fsHelper.getTestRootDir();
+    testRootDir = new File(testRoot).getAbsoluteFile();
+    conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
+        JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks"
+    );
+    // Lower the batch size for testing
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES,
+        2);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
+    fs = cluster.getFileSystem();
+    fsWrapper = new FileSystemTestWrapper(fs);
+    fcWrapper = new FileContextTestWrapper(
+        FileContext.getFileContext(cluster.getURI(), conf));
+    dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
+    // Need to set the client's KeyProvider to the NN's for JKS,
+    // else the updates do not get flushed properly
+    fs.getClient().provider = cluster.getNameNode().getNamesystem()
+        .getProvider();
+    // Create a test key
+    DFSTestUtil.createKey(TEST_KEY, cluster, conf);
+  }
+
+  @After
+  public void teardown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+    EncryptionFaultInjector.instance = new EncryptionFaultInjector();
+  }
+
+  public void assertNumZones(final int numZones) throws IOException {
+    RemoteIterator<EncryptionZone> it = dfsAdmin.listEncryptionZones();
+    int count = 0;
+    while (it.hasNext()) {
+      count++;
+      it.next();
+    }
+    assertEquals("Unexpected number of encryption zones!", numZones, count);
+  }
+
+  /**
+   * Checks that an encryption zone with the specified keyName and path (if not
+   * null) is present.
+   *
+   * @throws IOException if a matching zone could not be found
+   */
+  public void assertZonePresent(String keyName, String path) throws IOException {
+    final RemoteIterator<EncryptionZone> it = dfsAdmin.listEncryptionZones();
+    boolean match = false;
+    while (it.hasNext()) {
+      EncryptionZone zone = it.next();
+      boolean matchKey = (keyName == null);
+      boolean matchPath = (path == null);
+      if (keyName != null && zone.getKeyName().equals(keyName)) {
+        matchKey = true;
+      }
+      if (path != null && zone.getPath().equals(path)) {
+        matchPath = true;
+      }
+      if (matchKey && matchPath) {
+        match = true;
+        break;
+      }
+    }
+    assertTrue("Did not find expected encryption zone with keyName " + keyName +
+            " path " + path, match
+    );
+  }
+
+  @Test(timeout = 60000)
+  public void testBasicOperations() throws Exception {
+
+    int numZones = 0;
+
+    /* Test failure of create EZ on a directory that doesn't exist. */
+    final Path zoneParent = new Path("/zones");
+    final Path zone1 = new Path(zoneParent, "zone1");
+    try {
+      dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
+      fail("expected /test doesn't exist");
+    } catch (IOException e) {
+      assertExceptionContains("cannot find", e);
+    }
+
+    /* Normal creation of an EZ */
+    fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
+    assertNumZones(++numZones);
+    assertZonePresent(null, zone1.toString());
+
+    /* Test failure of create EZ on a directory which is already an EZ. */
+    try {
+      dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
+    } catch (IOException e) {
+      assertExceptionContains("already in an encryption zone", e);
+    }
+
+    /* Test failure of create EZ operation in an existing EZ. */
+    final Path zone1Child = new Path(zone1, "child");
+    fsWrapper.mkdir(zone1Child, FsPermission.getDirDefault(), false);
+    try {
+      dfsAdmin.createEncryptionZone(zone1Child, TEST_KEY);
+      fail("EZ in an EZ");
+    } catch (IOException e) {
+      assertExceptionContains("already in an encryption zone", e);
+    }
+
+    /* create EZ on parent of an EZ should fail */
+    try {
+      dfsAdmin.createEncryptionZone(zoneParent, TEST_KEY);
+      fail("EZ over an EZ");
+    } catch (IOException e) {
+      assertExceptionContains("encryption zone for a non-empty directory", e);
+    }
+
+    /* create EZ on a folder with a folder fails */
+    final Path notEmpty = new Path("/notEmpty");
+    final Path notEmptyChild = new Path(notEmpty, "child");
+    fsWrapper.mkdir(notEmptyChild, FsPermission.getDirDefault(), true);
+    try {
+      dfsAdmin.createEncryptionZone(notEmpty, TEST_KEY);
+      fail("Created EZ on an non-empty directory with folder");
+    } catch (IOException e) {
+      assertExceptionContains("create an encryption zone", e);
+    }
+    fsWrapper.delete(notEmptyChild, false);
+
+    /* create EZ on a folder with a file fails */
+    fsWrapper.createFile(notEmptyChild);
+    try {
+      dfsAdmin.createEncryptionZone(notEmpty, TEST_KEY);
+      fail("Created EZ on an non-empty directory with file");
+    } catch (IOException e) {
+      assertExceptionContains("create an encryption zone", e);
+    }
+
+    /* Test failure of create EZ on a file. */
+    try {
+      dfsAdmin.createEncryptionZone(notEmptyChild, TEST_KEY);
+      fail("Created EZ on a file");
+    } catch (IOException e) {
+      assertExceptionContains("create an encryption zone for a file.", e);
+    }
+
+    /* Test failure of creating an EZ passing a key that doesn't exist. */
+    final Path zone2 = new Path("/zone2");
+    fsWrapper.mkdir(zone2, FsPermission.getDirDefault(), false);
+    final String myKeyName = "mykeyname";
+    try {
+      dfsAdmin.createEncryptionZone(zone2, myKeyName);
+      fail("expected key doesn't exist");
+    } catch (IOException e) {
+      assertExceptionContains("doesn't exist.", e);
+    }
+
+    /* Test failure of empty and null key name */
+    try {
+      dfsAdmin.createEncryptionZone(zone2, "");
+      fail("created a zone with empty key name");
+    } catch (IOException e) {
+      assertExceptionContains("Must specify a key name when creating", e);
+    }
+    try {
+      dfsAdmin.createEncryptionZone(zone2, null);
+      fail("created a zone with null key name");
+    } catch (IOException e) {
+      assertExceptionContains("Must specify a key name when creating", e);
+    }
+
+    assertNumZones(1);
+
+    /* Test success of creating an EZ when they key exists. */
+    DFSTestUtil.createKey(myKeyName, cluster, conf);
+    dfsAdmin.createEncryptionZone(zone2, myKeyName);
+    assertNumZones(++numZones);
+    assertZonePresent(myKeyName, zone2.toString());
+
+    /* Test failure of create encryption zones as a non super user. */
+    final UserGroupInformation user = UserGroupInformation.
+        createUserForTesting("user", new String[] { "mygroup" });
+    final Path nonSuper = new Path("/nonSuper");
+    fsWrapper.mkdir(nonSuper, FsPermission.getDirDefault(), false);
+
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        final HdfsAdmin userAdmin =
+            new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+        try {
+          userAdmin.createEncryptionZone(nonSuper, TEST_KEY);
+          fail("createEncryptionZone is superuser-only operation");
+        } catch (AccessControlException e) {
+          assertExceptionContains("Superuser privilege is required", e);
+        }
+        return null;
+      }
+    });
+
+    // Test success of creating an encryption zone a few levels down.
+    Path deepZone = new Path("/d/e/e/p/zone");
+    fsWrapper.mkdir(deepZone, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(deepZone, TEST_KEY);
+    assertNumZones(++numZones);
+    assertZonePresent(null, deepZone.toString());
+
+    // Create and list some zones to test batching of listEZ
+    for (int i=1; i<6; i++) {
+      final Path zonePath = new Path("/listZone" + i);
+      fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false);
+      dfsAdmin.createEncryptionZone(zonePath, TEST_KEY);
+      numZones++;
+      assertNumZones(numZones);
+      assertZonePresent(null, zonePath.toString());
+    }
+  }
+
+  /**
+   * Test listing encryption zones as a non super user.
+   */
+  @Test(timeout = 60000)
+  public void testListEncryptionZonesAsNonSuperUser() throws Exception {
+
+    final UserGroupInformation user = UserGroupInformation.
+        createUserForTesting("user", new String[] { "mygroup" });
+
+    final Path testRoot = new Path(fsHelper.getTestRootDir());
+    final Path superPath = new Path(testRoot, "superuseronly");
+    final Path allPath = new Path(testRoot, "accessall");
+
+    fsWrapper.mkdir(superPath, new FsPermission((short) 0700), true);
+    dfsAdmin.createEncryptionZone(superPath, TEST_KEY);
+
+    fsWrapper.mkdir(allPath, new FsPermission((short) 0707), true);
+    dfsAdmin.createEncryptionZone(allPath, TEST_KEY);
+
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        final HdfsAdmin userAdmin =
+            new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+        try {
+          userAdmin.listEncryptionZones();
+        } catch (AccessControlException e) {
+          assertExceptionContains("Superuser privilege is required", e);
+        }
+        return null;
+      }
+    });
+  }
+
+  /**
+   * Test getEncryptionZoneForPath as a non super user.
+   */
+  @Test(timeout = 60000)
+  public void testGetEZAsNonSuperUser() throws Exception {
+
+    final UserGroupInformation user = UserGroupInformation.
+            createUserForTesting("user", new String[] { "mygroup" });
+
+    final Path testRoot = new Path(fsHelper.getTestRootDir());
+    final Path superPath = new Path(testRoot, "superuseronly");
+    final Path superPathFile = new Path(superPath, "file1");
+    final Path allPath = new Path(testRoot, "accessall");
+    final Path allPathFile = new Path(allPath, "file1");
+    final Path nonEZDir = new Path(testRoot, "nonEZDir");
+    final Path nonEZFile = new Path(nonEZDir, "file1");
+    final int len = 8192;
+
+    fsWrapper.mkdir(testRoot, new FsPermission((short) 0777), true);
+    fsWrapper.mkdir(superPath, new FsPermission((short) 0700), false);
+    fsWrapper.mkdir(allPath, new FsPermission((short) 0777), false);
+    fsWrapper.mkdir(nonEZDir, new FsPermission((short) 0777), false);
+    dfsAdmin.createEncryptionZone(superPath, TEST_KEY);
+    dfsAdmin.createEncryptionZone(allPath, TEST_KEY);
+    dfsAdmin.allowSnapshot(new Path("/"));
+    final Path newSnap = fs.createSnapshot(new Path("/"));
+    DFSTestUtil.createFile(fs, superPathFile, len, (short) 1, 0xFEED);
+    DFSTestUtil.createFile(fs, allPathFile, len, (short) 1, 0xFEED);
+    DFSTestUtil.createFile(fs, nonEZFile, len, (short) 1, 0xFEED);
+
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        final HdfsAdmin userAdmin =
+            new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+
+        // Check null arg
+        try {
+          userAdmin.getEncryptionZoneForPath(null);
+          fail("should have thrown NPE");
+        } catch (NullPointerException e) {
+          /*
+           * IWBNI we could use assertExceptionContains, but the NPE that is
+           * thrown has no message text.
+           */
+        }
+
+        // Check operation with accessible paths
+        assertEquals("expected ez path", allPath.toString(),
+            userAdmin.getEncryptionZoneForPath(allPath).getPath().
+            toString());
+        assertEquals("expected ez path", allPath.toString(),
+            userAdmin.getEncryptionZoneForPath(allPathFile).getPath().
+            toString());
+
+        // Check operation with inaccessible (lack of permissions) path
+        try {
+          userAdmin.getEncryptionZoneForPath(superPathFile);
+          fail("expected AccessControlException");
+        } catch (AccessControlException e) {
+          assertExceptionContains("Permission denied:", e);
+        }
+
+        // Check operation with non-ez paths
+        assertNull("expected null for non-ez path",
+            userAdmin.getEncryptionZoneForPath(nonEZDir));
+        assertNull("expected null for non-ez path",
+            userAdmin.getEncryptionZoneForPath(nonEZFile));
+
+        // Check operation with snapshots
+        String snapshottedAllPath = newSnap.toString() + allPath.toString();
+        assertEquals("expected ez path", allPath.toString(),
+            userAdmin.getEncryptionZoneForPath(
+                new Path(snapshottedAllPath)).getPath().toString());
+
+        /*
+         * Delete the file from the non-snapshot and test that it is still ok
+         * in the ez.
+         */
+        fs.delete(allPathFile, false);
+        assertEquals("expected ez path", allPath.toString(),
+            userAdmin.getEncryptionZoneForPath(
+                new Path(snapshottedAllPath)).getPath().toString());
+
+        // Delete the ez and make sure ss's ez is still ok.
+        fs.delete(allPath, true);
+        assertEquals("expected ez path", allPath.toString(),
+            userAdmin.getEncryptionZoneForPath(
+                new Path(snapshottedAllPath)).getPath().toString());
+        assertNull("expected null for deleted file path",
+            userAdmin.getEncryptionZoneForPath(allPathFile));
+        assertNull("expected null for deleted directory path",
+            userAdmin.getEncryptionZoneForPath(allPath));
+        return null;
+      }
+    });
+  }
+
+  /**
+   * Test success of Rename EZ on a directory which is already an EZ.
+   */
+  private void doRenameEncryptionZone(FSTestWrapper wrapper) throws Exception {
+    final Path testRoot = new Path(fsHelper.getTestRootDir());
+    final Path pathFoo = new Path(testRoot, "foo");
+    final Path pathFooBaz = new Path(pathFoo, "baz");
+    wrapper.mkdir(pathFoo, FsPermission.getDirDefault(), true);
+    dfsAdmin.createEncryptionZone(pathFoo, TEST_KEY);
+    wrapper.mkdir(pathFooBaz, FsPermission.getDirDefault(), true);
+    try {
+      wrapper.rename(pathFooBaz, testRoot);
+    } catch (IOException e) {
+      assertExceptionContains(pathFooBaz.toString() + " can't be moved from" +
+              " an encryption zone.", e
+      );
+    }
+  }
+
+  @Test(timeout = 60000)
+  public void testRenameFileSystem() throws Exception {
+    doRenameEncryptionZone(fsWrapper);
+  }
+
+  @Test(timeout = 60000)
+  public void testRenameFileContext() throws Exception {
+    doRenameEncryptionZone(fcWrapper);
+  }
+
+  private FileEncryptionInfo getFileEncryptionInfo(Path path) throws Exception {
+    LocatedBlocks blocks = fs.getClient().getLocatedBlocks(path.toString(), 0);
+    return blocks.getFileEncryptionInfo();
+  }
+
+  @Test(timeout = 120000)
+  public void testReadWrite() throws Exception {
+    final HdfsAdmin dfsAdmin =
+        new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+    // Create a base file for comparison
+    final Path baseFile = new Path("/base");
+    final int len = 8192;
+    DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED);
+    // Create the first enc file
+    final Path zone = new Path("/zone");
+    fs.mkdirs(zone);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY);
+    final Path encFile1 = new Path(zone, "myfile");
+    DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
+    // Read them back in and compare byte-by-byte
+    verifyFilesEqual(fs, baseFile, encFile1, len);
+    // Roll the key of the encryption zone
+    assertNumZones(1);
+    String keyName = dfsAdmin.listEncryptionZones().next().getKeyName();
+    cluster.getNamesystem().getProvider().rollNewVersion(keyName);
+    // Read them back in and compare byte-by-byte
+    verifyFilesEqual(fs, baseFile, encFile1, len);
+    // Write a new enc file and validate
+    final Path encFile2 = new Path(zone, "myfile2");
+    DFSTestUtil.createFile(fs, encFile2, len, (short) 1, 0xFEED);
+    // FEInfos should be different
+    FileEncryptionInfo feInfo1 = getFileEncryptionInfo(encFile1);
+    FileEncryptionInfo feInfo2 = getFileEncryptionInfo(encFile2);
+    assertFalse("EDEKs should be different", Arrays
+        .equals(feInfo1.getEncryptedDataEncryptionKey(),
+            feInfo2.getEncryptedDataEncryptionKey()));
+    assertNotEquals("Key was rolled, versions should be different",
+        feInfo1.getEzKeyVersionName(), feInfo2.getEzKeyVersionName());
+    // Contents still equal
+    verifyFilesEqual(fs, encFile1, encFile2, len);
+  }
+
+  @Test(timeout = 60000)
+  public void testCipherSuiteNegotiation() throws Exception {
+    final HdfsAdmin dfsAdmin =
+        new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+    final Path zone = new Path("/zone");
+    fs.mkdirs(zone);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY);
+    // Create a file in an EZ, which should succeed
+    DFSTestUtil
+        .createFile(fs, new Path(zone, "success1"), 0, (short) 1, 0xFEED);
+    // Pass no cipherSuites, fail
+    fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(0);
+    try {
+      DFSTestUtil.createFile(fs, new Path(zone, "fail"), 0, (short) 1, 0xFEED);
+      fail("Created a file without specifying a CipherSuite!");
+    } catch (UnknownCipherSuiteException e) {
+      assertExceptionContains("No cipher suites", e);
+    }
+    // Pass some unknown cipherSuites, fail
+    fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(3);
+    fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
+    fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
+    fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
+    try {
+      DFSTestUtil.createFile(fs, new Path(zone, "fail"), 0, (short) 1, 0xFEED);
+      fail("Created a file without specifying a CipherSuite!");
+    } catch (UnknownCipherSuiteException e) {
+      assertExceptionContains("No cipher suites", e);
+    }
+    // Pass some unknown and a good cipherSuites, success
+    fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(3);
+    fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING);
+    fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
+    fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
+    DFSTestUtil
+        .createFile(fs, new Path(zone, "success2"), 0, (short) 1, 0xFEED);
+    fs.getClient().cipherSuites = Lists.newArrayListWithCapacity(3);
+    fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
+    fs.getClient().cipherSuites.add(CipherSuite.UNKNOWN);
+    fs.getClient().cipherSuites.add(CipherSuite.AES_CTR_NOPADDING);
+    DFSTestUtil
+        .createFile(fs, new Path(zone, "success3"), 4096, (short) 1, 0xFEED);
+    // Check KeyProvider state
+    // Flushing the KP on the NN, since it caches, and init a test one
+    cluster.getNamesystem().getProvider().flush();
+    KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
+    List<String> keys = provider.getKeys();
+    assertEquals("Expected NN to have created one key per zone", 1,
+        keys.size());
+    List<KeyProvider.KeyVersion> allVersions = Lists.newArrayList();
+    for (String key : keys) {
+      List<KeyProvider.KeyVersion> versions = provider.getKeyVersions(key);
+      assertEquals("Should only have one key version per key", 1,
+          versions.size());
+      allVersions.addAll(versions);
+    }
+    // Check that the specified CipherSuite was correctly saved on the NN
+    for (int i = 2; i <= 3; i++) {
+      FileEncryptionInfo feInfo =
+          getFileEncryptionInfo(new Path(zone.toString() +
+              "/success" + i));
+      assertEquals(feInfo.getCipherSuite(), CipherSuite.AES_CTR_NOPADDING);
+    }
+  }
+
+  @Test(timeout = 120000)
+  public void testCreateEZWithNoProvider() throws Exception {
+    // Unset the key provider and make sure EZ ops don't work
+    final Configuration clusterConf = cluster.getConfiguration(0);
+    clusterConf.set(KeyProviderFactory.KEY_PROVIDER_PATH, "");
+    cluster.restartNameNode(true);
+    cluster.waitActive();
+    final Path zone1 = new Path("/zone1");
+    fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
+    try {
+      dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
+      fail("expected exception");
+    } catch (IOException e) {
+      assertExceptionContains("since no key provider is available", e);
+    }
+    clusterConf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
+        JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks"
+    );
+    // Try listing EZs as well
+    assertNumZones(0);
+  }
+
+  private class MyInjector extends EncryptionFaultInjector {
+    int generateCount;
+    CountDownLatch ready;
+    CountDownLatch wait;
+
+    public MyInjector() {
+      this.ready = new CountDownLatch(1);
+      this.wait = new CountDownLatch(1);
+    }
+
+    @Override
+    public void startFileAfterGenerateKey() throws IOException {
+      ready.countDown();
+      try {
+        wait.await();
+      } catch (InterruptedException e) {
+        throw new IOException(e);
+      }
+      generateCount++;
+    }
+  }
+
+  private class CreateFileTask implements Callable<Void> {
+    private FileSystemTestWrapper fsWrapper;
+    private Path name;
+
+    CreateFileTask(FileSystemTestWrapper fsWrapper, Path name) {
+      this.fsWrapper = fsWrapper;
+      this.name = name;
+    }
+
+    @Override
+    public Void call() throws Exception {
+      fsWrapper.createFile(name);
+      return null;
+    }
+  }
+
+  private class InjectFaultTask implements Callable<Void> {
+    final Path zone1 = new Path("/zone1");
+    final Path file = new Path(zone1, "file1");
+    final ExecutorService executor = Executors.newSingleThreadExecutor();
+
+    MyInjector injector;
+
+    @Override
+    public Void call() throws Exception {
+      // Set up the injector
+      injector = new MyInjector();
+      EncryptionFaultInjector.instance = injector;
+      Future<Void> future =
+          executor.submit(new CreateFileTask(fsWrapper, file));
+      injector.ready.await();
+      // Do the fault
+      doFault();
+      // Allow create to proceed
+      injector.wait.countDown();
+      future.get();
+      // Cleanup and postconditions
+      doCleanup();
+      return null;
+    }
+
+    public void doFault() throws Exception {}
+
+    public void doCleanup() throws Exception {}
+  }
+
+  /**
+   * Tests the retry logic in startFile. We release the lock while generating
+   * an EDEK, so tricky things can happen in the intervening time.
+   */
+  @Test(timeout = 120000)
+  public void testStartFileRetry() throws Exception {
+    final Path zone1 = new Path("/zone1");
+    final Path file = new Path(zone1, "file1");
+    fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
+    ExecutorService executor = Executors.newSingleThreadExecutor();
+
+    // Test when the parent directory becomes an EZ
+    executor.submit(new InjectFaultTask() {
+      @Override
+      public void doFault() throws Exception {
+        dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
+      }
+      @Override
+      public void doCleanup() throws Exception {
+        assertEquals("Expected a startFile retry", 2, injector.generateCount);
+        fsWrapper.delete(file, false);
+      }
+    }).get();
+
+    // Test when the parent directory unbecomes an EZ
+    executor.submit(new InjectFaultTask() {
+      @Override
+      public void doFault() throws Exception {
+        fsWrapper.delete(zone1, true);
+      }
+      @Override
+      public void doCleanup() throws Exception {
+        assertEquals("Expected no startFile retries", 1, injector.generateCount);
+        fsWrapper.delete(file, false);
+      }
+    }).get();
+
+    // Test when the parent directory becomes a different EZ
+    fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
+    final String otherKey = "otherKey";
+    DFSTestUtil.createKey(otherKey, cluster, conf);
+    dfsAdmin.createEncryptionZone(zone1, TEST_KEY);
+
+    executor.submit(new InjectFaultTask() {
+      @Override
+      public void doFault() throws Exception {
+        fsWrapper.delete(zone1, true);
+        fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
+        dfsAdmin.createEncryptionZone(zone1, otherKey);
+      }
+      @Override
+      public void doCleanup() throws Exception {
+        assertEquals("Expected a startFile retry", 2, injector.generateCount);
+        fsWrapper.delete(zone1, true);
+      }
+    }).get();
+
+    // Test that the retry limit leads to an error
+    fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
+    final String anotherKey = "anotherKey";
+    DFSTestUtil.createKey(anotherKey, cluster, conf);
+    dfsAdmin.createEncryptionZone(zone1, anotherKey);
+    String keyToUse = otherKey;
+
+    MyInjector injector = new MyInjector();
+    EncryptionFaultInjector.instance = injector;
+    Future<?> future = executor.submit(new CreateFileTask(fsWrapper, file));
+
+    // Flip-flop between two EZs to repeatedly fail
+    for (int i=0; i<10; i++) {
+      injector.ready.await();
+      fsWrapper.delete(zone1, true);
+      fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
+      dfsAdmin.createEncryptionZone(zone1, keyToUse);
+      if (keyToUse == otherKey) {
+        keyToUse = anotherKey;
+      } else {
+        keyToUse = otherKey;
+      }
+      injector.wait.countDown();
+      injector = new MyInjector();
+      EncryptionFaultInjector.instance = injector;
+    }
+    try {
+      future.get();
+      fail("Expected exception from too many retries");
+    } catch (ExecutionException e) {
+      assertExceptionContains(
+          "Too many retries because of encryption zone operations",
+          e.getCause());
+    }
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index 809e592..c8b7df2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -1142,7 +1142,7 @@
           try {
             nnrpc.create(pathStr, new FsPermission((short)0755), "client",
                 new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
-                true, (short)1, 128*1024*1024L);
+                true, (short)1, 128*1024*1024L, null);
             fail("Should have thrown exception when creating '"
                 + pathStr + "'" + " by " + method);
           } catch (InvalidPathException ipe) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
index b8cab89..28c253f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.mockito.Matchers.anyList;
 import static org.mockito.Matchers.anyString;
 import static org.mockito.Matchers.anyShort;
 import static org.mockito.Matchers.anyLong;
@@ -29,10 +30,12 @@
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -339,16 +342,16 @@
     Mockito.doReturn(
         new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
             (short) 777), "owner", "group", new byte[0], new byte[0],
-            1010, 0)).when(mcp).getFileInfo(anyString());
+            1010, 0, null)).when(mcp).getFileInfo(anyString());
     Mockito
         .doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
                 (short) 777), "owner", "group", new byte[0], new byte[0],
-                1010, 0))
+                1010, 0, null))
         .when(mcp)
         .create(anyString(), (FsPermission) anyObject(), anyString(),
             (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
-            anyShort(), anyLong());
+            anyShort(), anyLong(), (List<CipherSuite>) anyList());
 
     final Configuration conf = new Configuration();
     final DFSClient c1 = createDFSClientAs(ugi[0], conf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java
new file mode 100644
index 0000000..2a20954
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java
@@ -0,0 +1,350 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
+import org.apache.hadoop.crypto.key.KeyProviderFactory;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextTestWrapper;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FileSystemTestWrapper;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesEqual;
+import static org.apache.hadoop.hdfs.DFSTestUtil.verifyFilesNotEqual;
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
+import static org.apache.hadoop.test.GenericTestUtils.assertMatches;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+public class TestReservedRawPaths {
+
+  private Configuration conf;
+  private FileSystemTestHelper fsHelper;
+
+  private MiniDFSCluster cluster;
+  private HdfsAdmin dfsAdmin;
+  private DistributedFileSystem fs;
+  private final String TEST_KEY = "testKey";
+
+  protected FileSystemTestWrapper fsWrapper;
+  protected FileContextTestWrapper fcWrapper;
+
+  @Before
+  public void setup() throws Exception {
+    conf = new HdfsConfiguration();
+    fsHelper = new FileSystemTestHelper();
+    // Set up java key store
+    String testRoot = fsHelper.getTestRootDir();
+    File testRootDir = new File(testRoot).getAbsoluteFile();
+    conf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
+        JavaKeyStoreProvider.SCHEME_NAME + "://file" + testRootDir + "/test.jks"
+    );
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
+    fs = cluster.getFileSystem();
+    fsWrapper = new FileSystemTestWrapper(cluster.getFileSystem());
+    fcWrapper = new FileContextTestWrapper(
+        FileContext.getFileContext(cluster.getURI(), conf));
+    dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
+    // Need to set the client's KeyProvider to the NN's for JKS,
+    // else the updates do not get flushed properly
+    fs.getClient().provider = cluster.getNameNode().getNamesystem()
+        .getProvider();
+    DFSTestUtil.createKey(TEST_KEY, cluster, conf);
+  }
+
+  @After
+  public void teardown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Basic read/write tests of raw files.
+   * Create a non-encrypted file
+   * Create an encryption zone
+   * Verify that non-encrypted file contents and decrypted file in EZ are equal
+   * Compare the raw encrypted bytes of the file with the decrypted version to
+   *   ensure they're different
+   * Compare the raw and non-raw versions of the non-encrypted file to ensure
+   *   they're the same.
+   */
+  @Test(timeout = 120000)
+  public void testReadWriteRaw() throws Exception {
+    // Create a base file for comparison
+    final Path baseFile = new Path("/base");
+    final int len = 8192;
+    DFSTestUtil.createFile(fs, baseFile, len, (short) 1, 0xFEED);
+    // Create the first enc file
+    final Path zone = new Path("/zone");
+    fs.mkdirs(zone);
+    dfsAdmin.createEncryptionZone(zone, TEST_KEY);
+    final Path encFile1 = new Path(zone, "myfile");
+    DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
+    // Read them back in and compare byte-by-byte
+    verifyFilesEqual(fs, baseFile, encFile1, len);
+    // Raw file should be different from encrypted file
+    final Path encFile1Raw = new Path(zone, "/.reserved/raw/zone/myfile");
+    verifyFilesNotEqual(fs, encFile1Raw, encFile1, len);
+    // Raw file should be same as /base which is not in an EZ
+    final Path baseFileRaw = new Path(zone, "/.reserved/raw/base");
+    verifyFilesEqual(fs, baseFile, baseFileRaw, len);
+  }
+
+  private void assertPathEquals(Path p1, Path p2) throws IOException {
+    final FileStatus p1Stat = fs.getFileStatus(p1);
+    final FileStatus p2Stat = fs.getFileStatus(p2);
+
+    /*
+     * Use accessTime and modificationTime as substitutes for INode to check
+     * for resolution to the same underlying file.
+     */
+    assertEquals("Access times not equal", p1Stat.getAccessTime(),
+        p2Stat.getAccessTime());
+    assertEquals("Modification times not equal", p1Stat.getModificationTime(),
+        p2Stat.getModificationTime());
+    assertEquals("pathname1 not equal", p1,
+        Path.getPathWithoutSchemeAndAuthority(p1Stat.getPath()));
+    assertEquals("pathname1 not equal", p2,
+            Path.getPathWithoutSchemeAndAuthority(p2Stat.getPath()));
+  }
+
+  /**
+   * Tests that getFileStatus on raw and non raw resolve to the same
+   * file.
+   */
+  @Test(timeout = 120000)
+  public void testGetFileStatus() throws Exception {
+    final Path zone = new Path("zone");
+    final Path slashZone = new Path("/", zone);
+    fs.mkdirs(slashZone);
+    dfsAdmin.createEncryptionZone(slashZone, TEST_KEY);
+
+    final Path base = new Path("base");
+    final Path reservedRaw = new Path("/.reserved/raw");
+    final Path baseRaw = new Path(reservedRaw, base);
+    final int len = 8192;
+    DFSTestUtil.createFile(fs, baseRaw, len, (short) 1, 0xFEED);
+    assertPathEquals(new Path("/", base), baseRaw);
+
+    /* Repeat the test for a file in an ez. */
+    final Path ezEncFile = new Path(slashZone, base);
+    final Path ezRawEncFile =
+        new Path(new Path(reservedRaw, zone), base);
+    DFSTestUtil.createFile(fs, ezEncFile, len, (short) 1, 0xFEED);
+    assertPathEquals(ezEncFile, ezRawEncFile);
+  }
+
+  @Test(timeout = 120000)
+  public void testReservedRoot() throws Exception {
+    final Path root = new Path("/");
+    final Path rawRoot = new Path("/.reserved/raw");
+    final Path rawRootSlash = new Path("/.reserved/raw/");
+    assertPathEquals(root, rawRoot);
+    assertPathEquals(root, rawRootSlash);
+  }
+
+  /* Verify mkdir works ok in .reserved/raw directory. */
+  @Test(timeout = 120000)
+  public void testReservedRawMkdir() throws Exception {
+    final Path zone = new Path("zone");
+    final Path slashZone = new Path("/", zone);
+    fs.mkdirs(slashZone);
+    dfsAdmin.createEncryptionZone(slashZone, TEST_KEY);
+    final Path rawRoot = new Path("/.reserved/raw");
+    final Path dir1 = new Path("dir1");
+    final Path rawDir1 = new Path(rawRoot, dir1);
+    fs.mkdirs(rawDir1);
+    assertPathEquals(rawDir1, new Path("/", dir1));
+    fs.delete(rawDir1, true);
+    final Path rawZone = new Path(rawRoot, zone);
+    final Path rawDir1EZ = new Path(rawZone, dir1);
+    fs.mkdirs(rawDir1EZ);
+    assertPathEquals(rawDir1EZ, new Path(slashZone, dir1));
+    fs.delete(rawDir1EZ, true);
+  }
+
+  @Test(timeout = 120000)
+  public void testRelativePathnames() throws Exception {
+    final Path baseFileRaw = new Path("/.reserved/raw/base");
+    final int len = 8192;
+    DFSTestUtil.createFile(fs, baseFileRaw, len, (short) 1, 0xFEED);
+
+    final Path root = new Path("/");
+    final Path rawRoot = new Path("/.reserved/raw");
+    assertPathEquals(root, new Path(rawRoot, "../raw"));
+    assertPathEquals(root, new Path(rawRoot, "../../.reserved/raw"));
+    assertPathEquals(baseFileRaw, new Path(rawRoot, "../raw/base"));
+    assertPathEquals(baseFileRaw, new Path(rawRoot,
+        "../../.reserved/raw/base"));
+    assertPathEquals(baseFileRaw, new Path(rawRoot,
+        "../../.reserved/raw/base/../base"));
+    assertPathEquals(baseFileRaw, new Path(
+        "/.reserved/../.reserved/raw/../raw/base"));
+  }
+
+  @Test(timeout = 120000)
+  public void testAdminAccessOnly() throws Exception {
+    final Path zone = new Path("zone");
+    final Path slashZone = new Path("/", zone);
+    fs.mkdirs(slashZone);
+    dfsAdmin.createEncryptionZone(slashZone, TEST_KEY);
+    final Path base = new Path("base");
+    final Path reservedRaw = new Path("/.reserved/raw");
+    final int len = 8192;
+
+    /* Test failure of create file in reserved/raw as non admin */
+    final UserGroupInformation user = UserGroupInformation.
+        createUserForTesting("user", new String[] { "mygroup" });
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        final DistributedFileSystem fs = cluster.getFileSystem();
+        try {
+          final Path ezRawEncFile =
+              new Path(new Path(reservedRaw, zone), base);
+          DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED);
+          fail("access to /.reserved/raw is superuser-only operation");
+        } catch (AccessControlException e) {
+          assertExceptionContains("Superuser privilege is required", e);
+        }
+        return null;
+      }
+    });
+
+    /* Test failure of getFileStatus in reserved/raw as non admin */
+    final Path ezRawEncFile = new Path(new Path(reservedRaw, zone), base);
+    DFSTestUtil.createFile(fs, ezRawEncFile, len, (short) 1, 0xFEED);
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        final DistributedFileSystem fs = cluster.getFileSystem();
+        try {
+          fs.getFileStatus(ezRawEncFile);
+          fail("access to /.reserved/raw is superuser-only operation");
+        } catch (AccessControlException e) {
+          assertExceptionContains("Superuser privilege is required", e);
+        }
+        return null;
+      }
+    });
+
+    /* Test failure of listStatus in reserved/raw as non admin */
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        final DistributedFileSystem fs = cluster.getFileSystem();
+        try {
+          fs.listStatus(ezRawEncFile);
+          fail("access to /.reserved/raw is superuser-only operation");
+        } catch (AccessControlException e) {
+          assertExceptionContains("Superuser privilege is required", e);
+        }
+        return null;
+      }
+    });
+
+    fs.setPermission(new Path("/"), new FsPermission((short) 0777));
+    /* Test failure of mkdir in reserved/raw as non admin */
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        final DistributedFileSystem fs = cluster.getFileSystem();
+        final Path d1 = new Path(reservedRaw, "dir1");
+        try {
+          fs.mkdirs(d1);
+          fail("access to /.reserved/raw is superuser-only operation");
+        } catch (AccessControlException e) {
+          assertExceptionContains("Superuser privilege is required", e);
+        }
+        return null;
+      }
+    });
+  }
+
+  @Test(timeout = 120000)
+  public void testListDotReserved() throws Exception {
+    // Create a base file for comparison
+    final Path baseFileRaw = new Path("/.reserved/raw/base");
+    final int len = 8192;
+    DFSTestUtil.createFile(fs, baseFileRaw, len, (short) 1, 0xFEED);
+
+    /*
+     * Ensure that you can't list /.reserved. Ever.
+     */
+    try {
+      fs.listStatus(new Path("/.reserved"));
+      fail("expected FNFE");
+    } catch (FileNotFoundException e) {
+      assertExceptionContains("/.reserved does not exist", e);
+    }
+
+    try {
+      fs.listStatus(new Path("/.reserved/.inodes"));
+      fail("expected FNFE");
+    } catch (FileNotFoundException e) {
+      assertExceptionContains(
+              "/.reserved/.inodes does not exist", e);
+    }
+
+    final FileStatus[] fileStatuses = fs.listStatus(new Path("/.reserved/raw"));
+    assertEquals("expected 1 entry", fileStatuses.length, 1);
+    assertMatches(fileStatuses[0].getPath().toString(), "/.reserved/raw/base");
+  }
+
+  @Test(timeout = 120000)
+  public void testListRecursive() throws Exception {
+    Path rootPath = new Path("/");
+    Path p = rootPath;
+    for (int i = 0; i < 3; i++) {
+      p = new Path(p, "dir" + i);
+      fs.mkdirs(p);
+    }
+
+    Path curPath = new Path("/.reserved/raw");
+    int cnt = 0;
+    FileStatus[] fileStatuses = fs.listStatus(curPath);
+    while (fileStatuses != null && fileStatuses.length > 0) {
+      FileStatus f = fileStatuses[0];
+      assertMatches(f.getPath().toString(), "/.reserved/raw");
+      curPath = Path.getPathWithoutSchemeAndAuthority(f.getPath());
+      cnt++;
+      fileStatuses = fs.listStatus(curPath);
+    }
+    assertEquals(3, cnt);
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/TestHdfsCryptoStreams.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/TestHdfsCryptoStreams.java
new file mode 100644
index 0000000..35c13e6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/crypto/TestHdfsCryptoStreams.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.crypto;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CryptoCodec;
+import org.apache.hadoop.crypto.CryptoStreamsTestBase;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.crypto.CryptoFSDataInputStream;
+import org.apache.hadoop.fs.crypto.CryptoFSDataOutputStream;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+public class TestHdfsCryptoStreams extends CryptoStreamsTestBase {
+  private static MiniDFSCluster dfsCluster;
+  private static FileSystem fs;
+  private static int pathCount = 0;
+  private static Path path;
+  private static Path file;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    dfsCluster = new MiniDFSCluster.Builder(conf).build();
+    dfsCluster.waitClusterUp();
+    fs = dfsCluster.getFileSystem();
+    codec = CryptoCodec.getInstance(conf);
+  }
+
+  @AfterClass
+  public static void shutdown() throws Exception {
+    if (dfsCluster != null) {
+      dfsCluster.shutdown();
+    }
+  }
+
+  @Before
+  @Override
+  public void setUp() throws IOException {
+    ++pathCount;
+    path = new Path("/p" + pathCount);
+    file = new Path(path, "file");
+    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0700));
+
+    super.setUp();
+  }
+
+  @After
+  public void cleanUp() throws IOException {
+    fs.delete(path, true);
+  }
+
+  @Override
+  protected OutputStream getOutputStream(int bufferSize, byte[] key, byte[] iv)
+      throws IOException {
+    return new CryptoFSDataOutputStream(fs.create(file), codec, bufferSize,
+        key, iv);
+  }
+
+  @Override
+  protected InputStream getInputStream(int bufferSize, byte[] key, byte[] iv)
+      throws IOException {
+    return new CryptoFSDataInputStream(fs.open(file), codec, bufferSize, key,
+        iv);
+  }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
index 636ecc2..0c7b807 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
@@ -69,6 +69,7 @@
   protected static Configuration conf;
   private static int pathCount = 0;
   protected static Path path;
+  protected static Path rawPath;
   
   // XAttrs
   protected static final String name1 = "user.a1";
@@ -78,6 +79,8 @@
   protected static final byte[] value2 = {0x37, 0x38, 0x39};
   protected static final String name3 = "user.a3";
   protected static final String name4 = "user.a4";
+  protected static final String raw1 = "raw.a1";
+  protected static final String raw2 = "raw.a2";
 
   protected FileSystem fs;
 
@@ -107,6 +110,7 @@
   public void setUp() throws Exception {
     pathCount += 1;
     path = new Path("/p" + pathCount);
+    rawPath = new Path("/.reserved/raw/p" + pathCount);
     initFileSystem();
   }
 
@@ -395,7 +399,8 @@
       Assert.fail("expected IOException");
     } catch (Exception e) {
       GenericTestUtils.assertExceptionContains
-          ("An XAttr name must be prefixed with user/trusted/security/system, " +
+          ("An XAttr name must be prefixed with " +
+           "user/trusted/security/system/raw, " +
            "followed by a '.'",
           e);
     }
@@ -582,7 +587,7 @@
 
     /* Unknown namespace should throw an exception. */
     final String expectedExceptionString = "An XAttr name must be prefixed " +
-        "with user/trusted/security/system, followed by a '.'";
+        "with user/trusted/security/system/raw, followed by a '.'";
     try {
       fs.removeXAttr(path, "wackynamespace.foo");
       Assert.fail("expected IOException");
@@ -918,6 +923,176 @@
     fsAsDiana.removeXAttr(path, name2);
   }
   
+  @Test(timeout = 120000)
+  public void testRawXAttrs() throws Exception {
+    final UserGroupInformation user = UserGroupInformation.
+      createUserForTesting("user", new String[] {"mygroup"});
+
+    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
+    fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE,
+        XAttrSetFlag.REPLACE));
+
+    {
+      // getXAttr
+      final byte[] value = fs.getXAttr(rawPath, raw1);
+      Assert.assertArrayEquals(value, value1);
+    }
+
+    {
+      // getXAttrs
+      final Map<String, byte[]> xattrs = fs.getXAttrs(rawPath);
+      Assert.assertEquals(xattrs.size(), 1);
+      Assert.assertArrayEquals(value1, xattrs.get(raw1));
+      fs.removeXAttr(rawPath, raw1);
+    }
+
+    {
+      // replace and re-get
+      fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+      fs.setXAttr(rawPath, raw1, newValue1, EnumSet.of(XAttrSetFlag.CREATE,
+          XAttrSetFlag.REPLACE));
+
+      final Map<String,byte[]> xattrs = fs.getXAttrs(rawPath);
+      Assert.assertEquals(xattrs.size(), 1);
+      Assert.assertArrayEquals(newValue1, xattrs.get(raw1));
+
+      fs.removeXAttr(rawPath, raw1);
+    }
+
+    {
+      // listXAttrs on rawPath ensuring raw.* xattrs are returned
+      fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+      fs.setXAttr(rawPath, raw2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+
+      final List<String> xattrNames = fs.listXAttrs(rawPath);
+      assertTrue(xattrNames.contains(raw1));
+      assertTrue(xattrNames.contains(raw2));
+      assertTrue(xattrNames.size() == 2);
+      fs.removeXAttr(rawPath, raw1);
+      fs.removeXAttr(rawPath, raw2);
+    }
+
+    {
+      // listXAttrs on non-rawPath ensuring no raw.* xattrs returned
+      fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+      fs.setXAttr(rawPath, raw2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+
+      final List<String> xattrNames = fs.listXAttrs(path);
+      assertTrue(xattrNames.size() == 0);
+      fs.removeXAttr(rawPath, raw1);
+      fs.removeXAttr(rawPath, raw2);
+    }
+
+    {
+      /*
+       * Test non-root user operations in the "raw.*" namespace.
+       */
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final FileSystem userFs = dfsCluster.getFileSystem();
+          // Test that non-root can not set xattrs in the "raw.*" namespace
+          try {
+            // non-raw path
+            userFs.setXAttr(path, raw1, value1);
+            fail("setXAttr should have thrown");
+          } catch (AccessControlException e) {
+            // ignore
+          }
+
+          try {
+            // raw path
+            userFs.setXAttr(rawPath, raw1, value1);
+            fail("setXAttr should have thrown");
+          } catch (AccessControlException e) {
+            // ignore
+          }
+
+          // Test that non-root can not do getXAttrs in the "raw.*" namespace
+          try {
+            // non-raw path
+            userFs.getXAttrs(rawPath);
+            fail("getXAttrs should have thrown");
+          } catch (AccessControlException e) {
+            // ignore
+          }
+
+          try {
+            // raw path
+            userFs.getXAttrs(path);
+            fail("getXAttrs should have thrown");
+          } catch (AccessControlException e) {
+            // ignore
+          }
+
+          // Test that non-root can not do getXAttr in the "raw.*" namespace
+          try {
+            // non-raw path
+            userFs.getXAttr(rawPath, raw1);
+            fail("getXAttr should have thrown");
+          } catch (AccessControlException e) {
+            // ignore
+          }
+
+          try {
+            // raw path
+            userFs.getXAttr(path, raw1);
+            fail("getXAttr should have thrown");
+          } catch (AccessControlException e) {
+            // ignore
+          }
+          return null;
+        }
+        });
+    }
+
+    {
+      /*
+       * Test that non-root can not do getXAttr in the "raw.*" namespace
+       */
+      fs.setXAttr(rawPath, raw1, value1);
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            try {
+              // non-raw path
+              userFs.getXAttr(rawPath, raw1);
+              fail("getXAttr should have thrown");
+            } catch (AccessControlException e) {
+              // ignore
+            }
+
+            try {
+              // raw path
+              userFs.getXAttr(path, raw1);
+              fail("getXAttr should have thrown");
+            } catch (AccessControlException e) {
+              // ignore
+            }
+
+            /*
+             * Test that only root can see raw.* xattrs returned from listXAttr
+             * and non-root can't do listXAttrs on /.reserved/raw.
+             */
+            // non-raw path
+            final List<String> xattrNames = userFs.listXAttrs(path);
+            assertTrue(xattrNames.size() == 0);
+            try {
+              // raw path
+              userFs.listXAttrs(rawPath);
+              fail("listXAttrs on raw path should have thrown");
+            } catch (AccessControlException e) {
+              // ignore
+            }
+
+            return null;
+          }
+        });
+      fs.removeXAttr(rawPath, raw1);
+    }
+  }
+
   /**
    * Creates a FileSystem for the super-user.
    *
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index 7279aff..1fb1c1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -587,7 +587,8 @@
       // dummyActionNoSynch(fileIdx);
       nameNodeProto.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
                       clientName, new EnumSetWritable<CreateFlag>(EnumSet
-              .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE);
+              .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, 
+          replication, BLOCK_SIZE, null);
       long end = Time.now();
       for(boolean written = !closeUponCreate; !written; 
         written = nameNodeProto.complete(fileNames[daemonId][inputIdx],
@@ -1133,7 +1134,7 @@
         String fileName = nameGenerator.getNextFileName("ThroughputBench");
         nameNodeProto.create(fileName, FsPermission.getDefault(), clientName,
             new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, replication,
-            BLOCK_SIZE);
+            BLOCK_SIZE, null);
         ExtendedBlock lastBlock = addBlocks(fileName, clientName);
         nameNodeProto.complete(fileName, clientName, lastBlock, INodeId.GRANDFATHER_INODE_ID);
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
index 08c44c2..5153e76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
@@ -128,7 +128,7 @@
     nn.create(src, FsPermission.getFileDefault(),
         "clientName",
         new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
-        true, (short)3, 1024);
+        true, (short)3, 1024, null);
 
     // start first addBlock()
     LOG.info("Starting first addBlock for " + src);
@@ -155,7 +155,7 @@
     // create file
     nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
         new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
-        (short) 3, 1024);
+        (short) 3, 1024, null);
     // start first addBlock()
     LOG.info("Starting first addBlock for " + src);
     LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
index 011901d..ad067cf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
@@ -191,14 +191,19 @@
     existingXAttrs.add(xAttr1);
     existingXAttrs.add(xAttr2);
     
-    // Adding a system namespace xAttr, isn't affected by inode xAttrs limit.
-    XAttr newXAttr = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).
+    // Adding system and raw namespace xAttrs aren't affected by inode
+    // xAttrs limit.
+    XAttr newSystemXAttr = (new XAttr.Builder()).
+        setNameSpace(XAttr.NameSpace.SYSTEM).setName("a3").
+        setValue(new byte[]{0x33, 0x33, 0x33}).build();
+    XAttr newRawXAttr = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.RAW).
         setName("a3").setValue(new byte[]{0x33, 0x33, 0x33}).build();
-    List<XAttr> newXAttrs = Lists.newArrayListWithCapacity(1);
-    newXAttrs.add(newXAttr);
+    List<XAttr> newXAttrs = Lists.newArrayListWithCapacity(2);
+    newXAttrs.add(newSystemXAttr);
+    newXAttrs.add(newRawXAttr);
     List<XAttr> xAttrs = fsdir.setINodeXAttrs(existingXAttrs, newXAttrs,
         EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
-    assertEquals(xAttrs.size(), 3);
+    assertEquals(xAttrs.size(), 4);
     
     // Adding a trusted namespace xAttr, is affected by inode xAttrs limit.
     XAttr newXAttr1 = (new XAttr.Builder()).setNameSpace(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 1a4af42..4cddd60 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -1018,7 +1018,7 @@
 
     HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
         blockSize, modTime, accessTime, perms, owner, group, symlink, path,
-        fileId, numChildren);
+        fileId, numChildren, null);
     Result res = new Result(conf);
 
     try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
index a0ae43b..6559b50 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
@@ -209,19 +209,20 @@
     // Two retried calls succeed
     newCall();
     HdfsFileStatus status = namesystem.startFile(src, perm, "holder",
-        "clientmachine", EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize);
+        "clientmachine", EnumSet.of(CreateFlag.CREATE), true, (short) 1, 
+        BlockSize, null);
     Assert.assertEquals(status, namesystem.startFile(src, perm, 
         "holder", "clientmachine", EnumSet.of(CreateFlag.CREATE), 
-        true, (short) 1, BlockSize));
+        true, (short) 1, BlockSize, null));
     Assert.assertEquals(status, namesystem.startFile(src, perm, 
         "holder", "clientmachine", EnumSet.of(CreateFlag.CREATE), 
-        true, (short) 1, BlockSize));
+        true, (short) 1, BlockSize, null));
     
     // A non-retried call fails
     newCall();
     try {
       namesystem.startFile(src, perm, "holder", "clientmachine",
-          EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize);
+          EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize, null);
       Assert.fail("testCreate - expected exception is not thrown");
     } catch (IOException e) {
       // expected
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
index daa3aaa..899b888 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
@@ -395,7 +395,7 @@
       this.status = client.getNamenode().create(fileName,
           FsPermission.getFileDefault(), client.getClientName(),
           new EnumSetWritable<CreateFlag>(createFlag), false, DataNodes,
-          BlockSize);
+          BlockSize, null);
     }
 
     @Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
index db27554..b8150f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
@@ -64,7 +64,7 @@
     final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
         now, now + 10, new FsPermission((short) 0644), "user", "group",
         DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
-        INodeId.GRANDFATHER_INODE_ID, 0);
+        INodeId.GRANDFATHER_INODE_ID, 0, null);
     final FileStatus fstatus = toFileStatus(status, parent);
     System.out.println("status  = " + status);
     System.out.println("fstatus = " + fstatus);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
new file mode 100644
index 0000000..ebbf773
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
@@ -0,0 +1,284 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="testConf.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+  <!-- Normal mode is test. To run just the commands and dump the output
+       to the log, set it to nocompare -->
+  <mode>test</mode>
+
+  <!--  Comparator types:
+           ExactComparator
+           SubstringComparator
+           RegexpComparator
+           TokenComparator
+           -->
+  <tests>
+
+    <test>
+      <description>Test basic usage</description>
+      <test-commands>
+        <crypto-admin-command></crypto-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+	  <expected-output>Usage: bin/hdfs crypto [COMMAND]</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>Test create ez, dir doesn't exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -ls /test</command>-
+        <crypto-admin-command>-createZone -path /test -keyName myKey</crypto-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>cannot find /test</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>Test failure of create ez on an existing ez</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /foo</command>
+        <command>-fs NAMENODE -ls /</command>-
+        <crypto-admin-command>-createZone -path /foo -keyName myKey</crypto-admin-command>
+        <crypto-admin-command>-createZone -path /foo -keyName myKey</crypto-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /foo</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Directory /foo is already in an encryption zone</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>Test failure of Create EZ operation in an existing EZ.</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /foo</command>
+        <command>-fs NAMENODE -ls /</command>-
+        <crypto-admin-command>-createZone -keyName myKey -path /foo</crypto-admin-command>
+        <command>-fs NAMENODE -mkdir /foo/bar</command>
+        <crypto-admin-command>-createZone -keyName myKey -path /foo/bar</crypto-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /foo/bar</command>
+        <command>-fs NAMENODE -rmdir /foo</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Directory /foo/bar is already in an encryption zone. (/foo)</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>Test failure of creating an EZ using a non-empty directory.</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /foo</command>
+        <command>-fs NAMENODE -touchz /foo/bar</command>
+        <command>-fs NAMENODE -ls /</command>-
+        <crypto-admin-command>-createZone -keyName myKey -path /foo</crypto-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /foo/bar</command>
+        <command>-fs NAMENODE -rmdir /foo</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Attempt to create an encryption zone for a non-empty directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>Test failure of creating an EZ passing a key that doesn't exist.</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /foo</command>
+        <command>-fs NAMENODE -ls /</command>-
+        <crypto-admin-command>-createZone -path /foo -keyName doesntexist</crypto-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /foo</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Key doesntexist doesn't exist.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>Test failure of creating an EZ no path is specified.</description>
+      <test-commands>
+        <crypto-admin-command>-createZone -keyName blahKey</crypto-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>You must specify a path</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>Test failure of creating an EZ no key is specified.</description>
+      <test-commands>
+        <crypto-admin-command>-createZone -path /foo</crypto-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>You must specify a key name</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>Test success of creating an encryption zone a few levels down.</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /foo</command>
+        <command>-fs NAMENODE -mkdir /foo/bar</command>
+        <command>-fs NAMENODE -mkdir /foo/bar/baz</command>
+        <command>-fs NAMENODE -ls /</command>-
+        <crypto-admin-command>-createZone -path /foo/bar/baz -keyName myKey</crypto-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /foo/bar/baz</command>
+        <command>-fs NAMENODE -rmdir /foo/bar</command>
+        <command>-fs NAMENODE -rmdir /foo/</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Added encryption zone /foo/bar/baz</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>Test failure of renaming file cross EZ's</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /src</command>
+        <command>-fs NAMENODE -mkdir /dst</command>
+        <command>-fs NAMENODE -ls /</command>-
+        <crypto-admin-command>-createZone -path /src -keyName myKey</crypto-admin-command>
+        <crypto-admin-command>-createZone -path /dst -keyName myKey</crypto-admin-command>
+        <command>-fs NAMENODE -mkdir /src/subdir</command>
+        <command>-fs NAMENODE -mv /src/subdir /dst</command>-
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /src/subdir</command>
+        <command>-fs NAMENODE -rmdir /src</command>
+        <command>-fs NAMENODE -rmdir /dst</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>/src/subdir can't be moved from encryption zone /src to encryption zone /dst.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>Test failure of renaming a non-EZ file into an EZ</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /src</command>
+        <command>-fs NAMENODE -mkdir /dst</command>
+        <command>-fs NAMENODE -ls /</command>-
+        <crypto-admin-command>-createZone -path /dst -keyName myKey</crypto-admin-command>
+        <command>-fs NAMENODE -mv /src /dst</command>-
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /src</command>
+        <command>-fs NAMENODE -rmdir /dst</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>/src can't be moved into an encryption zone</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>Test failure of renaming a non-EZ file from an EZ</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /src</command>
+        <command>-fs NAMENODE -mkdir /dst</command>
+        <command>-fs NAMENODE -ls /</command>-
+        <crypto-admin-command>-createZone -path /src -keyName myKey</crypto-admin-command>
+        <command>-fs NAMENODE -mv /src /dst</command>-
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /src</command>
+        <command>-fs NAMENODE -rmdir /dst</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>/src can't be moved from an encryption zone</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>Test success of renaming file intra-EZ</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /src</command>
+        <crypto-admin-command>-createZone -path /src -keyName myKey</crypto-admin-command>
+        <command>-fs NAMENODE -mkdir /src/subdir1</command>
+        <command>-fs NAMENODE -mkdir /src/subdir2</command>
+        <command>-fs NAMENODE -mv /src/subdir1 /src/subdir2</command>-
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /src/subdir2/subdir1</command>
+        <command>-fs NAMENODE -rmdir /src/subdir2</command>
+        <command>-fs NAMENODE -rmdir /src</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+  </tests>
+</configuration>
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
index 7b7f866..3414f57 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
@@ -64,7 +64,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>name must be prefixed with user/trusted/security/system, followed by a '.'</expected-output>
+          <expected-output>name must be prefixed with user/trusted/security/system/raw, followed by a '.'</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -126,6 +126,42 @@
     </test>
     
     <test>
+      <description>setfattr : Add an xattr of raw namespace</description>
+      <test-commands>
+          <command>-fs NAMENODE -touchz /file1</command>
+          <command>-fs NAMENODE -setfattr -n raw.a1 -v 123456 /file1</command>
+      </test-commands>
+      <cleanup-commands>
+          <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+          <comparator>
+              <type>SubstringComparator</type>
+              <expected-output>setfattr: User doesn't have permission for xattr: raw.a1</expected-output>
+          </comparator>
+      </comparators>
+
+    </test>
+
+    <test>
+        <description>setfattr : Add an xattr of raw namespace</description>
+        <test-commands>
+            <command>-fs NAMENODE -touchz /file1</command>
+            <command>-fs NAMENODE -setfattr -n raw.a1 -v 123456 /.reserved/raw/file1</command>
+            <command>-fs NAMENODE -getfattr -n raw.a1 /.reserved/raw/file1</command>
+        </test-commands>
+        <cleanup-commands>
+            <command>-fs NAMENODE -rm /file1</command>
+        </cleanup-commands>
+        <comparators>
+            <comparator>
+                <type>SubstringComparator</type>
+                <expected-output>raw.a1="123456"</expected-output>
+            </comparator>
+        </comparators>
+    </test>
+
+    <test>
       <description>setfattr : Add an xattr, and encode is text</description>
       <test-commands>
         <command>-fs NAMENODE -touchz /file1</command>
@@ -256,6 +292,26 @@
         </comparator>
       </comparators>
     </test>
+
+    <test>
+        <description>setfattr : Remove an xattr of raw namespace</description>
+        <test-commands>
+            <command>-fs NAMENODE -touchz /file1</command>
+            <command>-fs NAMENODE -setfattr -n raw.a1 -v 123456 /.reserved/raw/file1</command>
+            <command>-fs NAMENODE -setfattr -n raw.a2 -v 123456 /.reserved/raw/file1</command>
+            <command>-fs NAMENODE -setfattr -x raw.a2 /.reserved/raw/file1</command>
+            <command>-fs NAMENODE -getfattr -d /.reserved/raw/file1</command>
+        </test-commands>
+        <cleanup-commands>
+            <command>-fs NAMENODE -rm /file1</command>
+        </cleanup-commands>
+        <comparators>
+            <comparator>
+                <type>SubstringComparator</type>
+		<expected-output># file: /.reserved/raw/file1#LF#raw.a1="123456"#LF#</expected-output>
+            </comparator>
+        </comparators>
+    </test>
     
     <test>
       <description>getfattr : Get an xattr</description>
diff --git a/hadoop-mapreduce-project/CHANGES-fs-encryption.txt b/hadoop-mapreduce-project/CHANGES-fs-encryption.txt
new file mode 100644
index 0000000..3e1718e
--- /dev/null
+++ b/hadoop-mapreduce-project/CHANGES-fs-encryption.txt
@@ -0,0 +1,20 @@
+Hadoop MapReduce Change Log
+
+fs-encryption (Unreleased)
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+    MAPREDUCE-5890. Support for encrypting Intermediate 
+    data and spills in local filesystem. (asuresh via tucu)
+
+  IMPROVEMENTS
+
+    MAPREDUCE-6007. Add support to distcp to preserve raw.* namespace
+    extended attributes. (clamb)
+
+    HDFS-6872. Fix TestOptionsParser. (clamb)
+
+  BUG FIXES
+
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
index cfcf0f2..be7fe18 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
@@ -31,6 +31,7 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
@@ -43,6 +44,7 @@
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.CryptoUtils;
 
 /**
  * <code>BackupStore</code> is an utility class that is used to support
@@ -572,7 +574,9 @@
 
       file = lDirAlloc.getLocalPathForWrite(tmp.toUri().getPath(), 
           -1, conf);
-      return new Writer<K, V>(conf, fs, file);
+      FSDataOutputStream out = fs.create(file);
+      out = CryptoUtils.wrapIfNecessary(conf, out);
+      return new Writer<K, V>(conf, out, null, null, null, null, true);
     }
   }
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java
index a410c97..30ebd6b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java
@@ -90,13 +90,11 @@
     
     DataOutputBuffer buffer = new DataOutputBuffer();
 
-    public Writer(Configuration conf, FileSystem fs, Path file, 
-                  Class<K> keyClass, Class<V> valueClass,
-                  CompressionCodec codec,
-                  Counters.Counter writesCounter) throws IOException {
-      this(conf, fs.create(file), keyClass, valueClass, codec,
-           writesCounter);
-      ownOutputStream = true;
+    public Writer(Configuration conf, FSDataOutputStream out,
+        Class<K> keyClass, Class<V> valueClass,
+        CompressionCodec codec, Counters.Counter writesCounter)
+        throws IOException {
+      this(conf, out, keyClass, valueClass, codec, writesCounter, false);
     }
     
     protected Writer(Counters.Counter writesCounter) {
@@ -105,7 +103,8 @@
 
     public Writer(Configuration conf, FSDataOutputStream out, 
         Class<K> keyClass, Class<V> valueClass,
-        CompressionCodec codec, Counters.Counter writesCounter)
+        CompressionCodec codec, Counters.Counter writesCounter,
+        boolean ownOutputStream)
         throws IOException {
       this.writtenRecordsCounter = writesCounter;
       this.checksumOut = new IFileOutputStream(out);
@@ -137,11 +136,7 @@
         this.valueSerializer = serializationFactory.getSerializer(valueClass);
         this.valueSerializer.open(buffer);
       }
-    }
-
-    public Writer(Configuration conf, FileSystem fs, Path file) 
-    throws IOException {
-      this(conf, fs, file, null, null, null, null);
+      this.ownOutputStream = ownOutputStream;
     }
 
     public void close() throws IOException {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
index 84fdd92..b533ebe 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
@@ -66,6 +66,7 @@
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter;
 import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitIndex;
 import org.apache.hadoop.mapreduce.task.MapContextImpl;
+import org.apache.hadoop.mapreduce.CryptoUtils;
 import org.apache.hadoop.util.IndexedSortable;
 import org.apache.hadoop.util.IndexedSorter;
 import org.apache.hadoop.util.Progress;
@@ -1580,7 +1581,8 @@
           IFile.Writer<K, V> writer = null;
           try {
             long segmentStart = out.getPos();
-            writer = new Writer<K, V>(job, out, keyClass, valClass, codec,
+            FSDataOutputStream partitionOut = CryptoUtils.wrapIfNecessary(job, out);
+            writer = new Writer<K, V>(job, partitionOut, keyClass, valClass, codec,
                                       spilledRecordsCounter);
             if (combinerRunner == null) {
               // spill directly
@@ -1617,8 +1619,8 @@
 
             // record offsets
             rec.startOffset = segmentStart;
-            rec.rawLength = writer.getRawLength();
-            rec.partLength = writer.getCompressedLength();
+            rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job);
+            rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job);
             spillRec.putIndex(rec, i);
 
             writer = null;
@@ -1668,7 +1670,8 @@
           try {
             long segmentStart = out.getPos();
             // Create a new codec, don't care!
-            writer = new IFile.Writer<K,V>(job, out, keyClass, valClass, codec,
+            FSDataOutputStream partitionOut = CryptoUtils.wrapIfNecessary(job, out);
+            writer = new IFile.Writer<K,V>(job, partitionOut, keyClass, valClass, codec,
                                             spilledRecordsCounter);
 
             if (i == partition) {
@@ -1682,8 +1685,8 @@
 
             // record offsets
             rec.startOffset = segmentStart;
-            rec.rawLength = writer.getRawLength();
-            rec.partLength = writer.getCompressedLength();
+            rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job);
+            rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job);
             spillRec.putIndex(rec, i);
 
             writer = null;
@@ -1825,12 +1828,13 @@
         try {
           for (int i = 0; i < partitions; i++) {
             long segmentStart = finalOut.getPos();
+            FSDataOutputStream finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut);
             Writer<K, V> writer =
-              new Writer<K, V>(job, finalOut, keyClass, valClass, codec, null);
+              new Writer<K, V>(job, finalPartitionOut, keyClass, valClass, codec, null);
             writer.close();
             rec.startOffset = segmentStart;
-            rec.rawLength = writer.getRawLength();
-            rec.partLength = writer.getCompressedLength();
+            rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job);
+            rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job);
             sr.putIndex(rec, i);
           }
           sr.writeToFile(finalIndexFile, job);
@@ -1879,8 +1883,9 @@
 
           //write merged output to disk
           long segmentStart = finalOut.getPos();
+          FSDataOutputStream finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut);
           Writer<K, V> writer =
-              new Writer<K, V>(job, finalOut, keyClass, valClass, codec,
+              new Writer<K, V>(job, finalPartitionOut, keyClass, valClass, codec,
                                spilledRecordsCounter);
           if (combinerRunner == null || numSpills < minSpillsForCombine) {
             Merger.writeFile(kvIter, writer, reporter, job);
@@ -1896,8 +1901,8 @@
           
           // record offsets
           rec.startOffset = segmentStart;
-          rec.rawLength = writer.getRawLength();
-          rec.partLength = writer.getCompressedLength();
+          rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job);
+          rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job);
           spillRec.putIndex(rec, parts);
         }
         spillRec.writeToFile(finalIndexFile, job);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java
index 9493871..92855169 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java
@@ -30,6 +30,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.ChecksumFileSystem;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
@@ -40,6 +41,7 @@
 import org.apache.hadoop.mapred.IFile.Writer;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.CryptoUtils;
 import org.apache.hadoop.util.PriorityQueue;
 import org.apache.hadoop.util.Progress;
 import org.apache.hadoop.util.Progressable;
@@ -298,8 +300,12 @@
     void init(Counters.Counter readsCounter) throws IOException {
       if (reader == null) {
         FSDataInputStream in = fs.open(file);
+
         in.seek(segmentOffset);
-        reader = new Reader<K, V>(conf, in, segmentLength, codec, readsCounter);
+        in = CryptoUtils.wrapIfNecessary(conf, in);
+        reader = new Reader<K, V>(conf, in,
+            segmentLength - CryptoUtils.cryptoPadding(conf),
+            codec, readsCounter);
       }
       
       if (mapOutputsCounter != null) {
@@ -714,9 +720,10 @@
                                               tmpFilename.toString(),
                                               approxOutputSize, conf);
 
-          Writer<K, V> writer = 
-            new Writer<K, V>(conf, fs, outputFile, keyClass, valueClass, codec,
-                             writesCounter);
+          FSDataOutputStream out = fs.create(outputFile);
+          out = CryptoUtils.wrapIfNecessary(conf, out);
+          Writer<K, V> writer = new Writer<K, V>(conf, out, keyClass, valueClass,
+              codec, writesCounter, true);
           writeFile(this, writer, reporter, conf);
           writer.close();
           
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java
new file mode 100644
index 0000000..7d8a496
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/CryptoUtils.java
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.CryptoCodec;
+import org.apache.hadoop.crypto.CryptoInputStream;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.crypto.CryptoFSDataInputStream;
+import org.apache.hadoop.fs.crypto.CryptoFSDataOutputStream;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.security.TokenCache;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import com.google.common.io.LimitInputStream;
+
+/**
+ * This class provides utilities to make it easier to work with Cryptographic
+ * Streams. Specifically for dealing with encrypting intermediate data such
+ * MapReduce spill files.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class CryptoUtils {
+
+  private static final Log LOG = LogFactory.getLog(CryptoUtils.class);
+
+  public static boolean isShuffleEncrypted(Configuration conf) {
+    return conf.getBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA,
+        MRJobConfig.DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA);
+  }
+
+  /**
+   * This method creates and initializes an IV (Initialization Vector)
+   * 
+   * @param conf
+   * @return byte[]
+   * @throws IOException
+   */
+  public static byte[] createIV(Configuration conf) throws IOException {
+    CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
+    if (isShuffleEncrypted(conf)) {
+      byte[] iv = new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()];
+      cryptoCodec.generateSecureRandom(iv);
+      return iv;
+    } else {
+      return null;
+    }
+  }
+
+  public static int cryptoPadding(Configuration conf) {
+    // Sizeof(IV) + long(start-offset)
+    return isShuffleEncrypted(conf) ? CryptoCodec.getInstance(conf)
+        .getCipherSuite().getAlgorithmBlockSize() + 8 : 0;
+  }
+
+  private static byte[] getEncryptionKey() throws IOException {
+    return TokenCache.getShuffleSecretKey(UserGroupInformation.getCurrentUser()
+        .getCredentials());
+  }
+
+  private static int getBufferSize(Configuration conf) {
+    return conf.getInt(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA_BUFFER_KB,
+        MRJobConfig.DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA_BUFFER_KB) * 1024;
+  }
+
+  /**
+   * Wraps a given FSDataOutputStream with a CryptoOutputStream. The size of the
+   * data buffer required for the stream is specified by the
+   * "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
+   * variable.
+   * 
+   * @param conf
+   * @param out
+   * @return FSDataOutputStream
+   * @throws IOException
+   */
+  public static FSDataOutputStream wrapIfNecessary(Configuration conf,
+      FSDataOutputStream out) throws IOException {
+    if (isShuffleEncrypted(conf)) {
+      out.write(ByteBuffer.allocate(8).putLong(out.getPos()).array());
+      byte[] iv = createIV(conf);
+      out.write(iv);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("IV written to Stream ["
+            + Base64.encodeBase64URLSafeString(iv) + "]");
+      }
+      return new CryptoFSDataOutputStream(out, CryptoCodec.getInstance(conf),
+          getBufferSize(conf), getEncryptionKey(), iv);
+    } else {
+      return out;
+    }
+  }
+
+  /**
+   * Wraps a given InputStream with a CryptoInputStream. The size of the data
+   * buffer required for the stream is specified by the
+   * "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
+   * variable.
+   * 
+   * If the value of 'length' is > -1, The InputStream is additionally wrapped
+   * in a LimitInputStream. CryptoStreams are late buffering in nature. This
+   * means they will always try to read ahead if they can. The LimitInputStream
+   * will ensure that the CryptoStream does not read past the provided length
+   * from the given Input Stream.
+   * 
+   * @param conf
+   * @param in
+   * @param length
+   * @return InputStream
+   * @throws IOException
+   */
+  public static InputStream wrapIfNecessary(Configuration conf, InputStream in,
+      long length) throws IOException {
+    if (isShuffleEncrypted(conf)) {
+      int bufferSize = getBufferSize(conf);
+      if (length > -1) {
+        in = new LimitInputStream(in, length);
+      }
+      byte[] offsetArray = new byte[8];
+      IOUtils.readFully(in, offsetArray, 0, 8);
+      long offset = ByteBuffer.wrap(offsetArray).getLong();
+      CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
+      byte[] iv = 
+          new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()];
+      IOUtils.readFully(in, iv, 0, 
+          cryptoCodec.getCipherSuite().getAlgorithmBlockSize());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("IV read from ["
+            + Base64.encodeBase64URLSafeString(iv) + "]");
+      }
+      return new CryptoInputStream(in, cryptoCodec, bufferSize,
+          getEncryptionKey(), iv, offset + cryptoPadding(conf));
+    } else {
+      return in;
+    }
+  }
+
+  /**
+   * Wraps a given FSDataInputStream with a CryptoInputStream. The size of the
+   * data buffer required for the stream is specified by the
+   * "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
+   * variable.
+   * 
+   * @param conf
+   * @param in
+   * @return FSDataInputStream
+   * @throws IOException
+   */
+  public static FSDataInputStream wrapIfNecessary(Configuration conf,
+      FSDataInputStream in) throws IOException {
+    if (isShuffleEncrypted(conf)) {
+      CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
+      int bufferSize = getBufferSize(conf);
+      // Not going to be used... but still has to be read...
+      // Since the O/P stream always writes it..
+      IOUtils.readFully(in, new byte[8], 0, 8);
+      byte[] iv = 
+          new byte[cryptoCodec.getCipherSuite().getAlgorithmBlockSize()];
+      IOUtils.readFully(in, iv, 0, 
+          cryptoCodec.getCipherSuite().getAlgorithmBlockSize());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("IV read from Stream ["
+            + Base64.encodeBase64URLSafeString(iv) + "]");
+      }
+      return new CryptoFSDataInputStream(in, cryptoCodec, bufferSize,
+          getEncryptionKey(), iv);
+    } else {
+      return in;
+    }
+  }
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
index 94e7125..0734e7f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
@@ -291,7 +291,7 @@
   /**
    * configure the jobconf of the user with the command line options of 
    * -libjars, -files, -archives.
-   * @param conf
+   * @param job
    * @throws IOException
    */
   private void copyAndConfigureFiles(Job job, Path jobSubmitDir) 
@@ -376,8 +376,13 @@
       if (TokenCache.getShuffleSecretKey(job.getCredentials()) == null) {
         KeyGenerator keyGen;
         try {
+         
+          int keyLen = CryptoUtils.isShuffleEncrypted(conf) 
+              ? conf.getInt(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS, 
+                  MRJobConfig.DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS)
+              : SHUFFLE_KEY_LENGTH;
           keyGen = KeyGenerator.getInstance(SHUFFLE_KEYGEN_ALGORITHM);
-          keyGen.init(SHUFFLE_KEY_LENGTH);
+          keyGen.init(keyLen);
         } catch (NoSuchAlgorithmException e) {
           throw new IOException("Error generating shuffle secret key", e);
         }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index aef84c0..4c48cf5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -771,4 +771,18 @@
   
   public static final String TASK_PREEMPTION =
       "mapreduce.job.preemption";
+
+  public static final String MR_ENCRYPTED_INTERMEDIATE_DATA =
+      "mapreduce.job.encrypted-intermediate-data";
+  public static final boolean DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA = false;
+
+  public static final String MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS =
+      "mapreduce.job.encrypted-intermediate-data-key-size-bits";
+  public static final int DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA_KEY_SIZE_BITS =
+      128;
+
+  public static final String MR_ENCRYPTED_INTERMEDIATE_DATA_BUFFER_KB =
+      "mapreduce.job.encrypted-intermediate-data.buffer.kb";
+  public static final int DEFAULT_MR_ENCRYPTED_INTERMEDIATE_DATA_BUFFER_KB =
+          128;
 }
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
index 94966b9..e1e1663 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
@@ -19,6 +19,7 @@
 
 import java.io.DataInputStream;
 import java.io.IOException;
+import java.io.InputStream;
 import java.net.ConnectException;
 import java.net.HttpURLConnection;
 import java.net.MalformedURLException;
@@ -43,6 +44,7 @@
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.security.SecureShuffleUtils;
+import org.apache.hadoop.mapreduce.CryptoUtils;
 import org.apache.hadoop.security.ssl.SSLFactory;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -65,6 +67,7 @@
                                     CONNECTION, WRONG_REDUCE}
   
   private final static String SHUFFLE_ERR_GRP_NAME = "Shuffle Errors";
+  private final JobConf jobConf;
   private final Counters.Counter connectionErrs;
   private final Counters.Counter ioErrs;
   private final Counters.Counter wrongLengthErrs;
@@ -104,6 +107,7 @@
                  Reporter reporter, ShuffleClientMetrics metrics,
                  ExceptionReporter exceptionReporter, SecretKey shuffleKey,
                  int id) {
+    this.jobConf = job;
     this.reporter = reporter;
     this.scheduler = scheduler;
     this.merger = merger;
@@ -396,7 +400,11 @@
         return remaining.toArray(new TaskAttemptID[remaining.size()]);
       }
 
- 
+      InputStream is = input;
+      is = CryptoUtils.wrapIfNecessary(jobConf, is, compressedLength);
+      compressedLength -= CryptoUtils.cryptoPadding(jobConf);
+      decompressedLength -= CryptoUtils.cryptoPadding(jobConf);
+      
       // Do some basic sanity verification
       if (!verifySanity(compressedLength, decompressedLength, forReduce,
           remaining, mapId)) {
@@ -433,7 +441,7 @@
         LOG.info("fetcher#" + id + " about to shuffle output of map "
             + mapOutput.getMapId() + " decomp: " + decompressedLength
             + " len: " + compressedLength + " to " + mapOutput.getDescription());
-        mapOutput.shuffle(host, input, compressedLength, decompressedLength,
+        mapOutput.shuffle(host, is, compressedLength, decompressedLength,
             metrics, reporter);
       } catch (java.lang.InternalError e) {
         LOG.warn("Failed to shuffle for fetcher#"+id, e);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java
index 5279652..98256c2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/LocalFetcher.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.SpillRecord;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.CryptoUtils;
 
 /**
  * LocalFetcher is used by LocalJobRunner to perform a local filesystem
@@ -145,6 +146,9 @@
     // now read the file, seek to the appropriate section, and send it.
     FileSystem localFs = FileSystem.getLocal(job).getRaw();
     FSDataInputStream inStream = localFs.open(mapOutputFileName);
+
+    inStream = CryptoUtils.wrapIfNecessary(job, inStream);
+
     try {
       inStream.seek(ir.startOffset);
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
index 49aa857..a4b1aa8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
@@ -30,6 +30,7 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.ChecksumFileSystem;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.LocalFileSystem;
@@ -54,6 +55,7 @@
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.TaskID;
+import org.apache.hadoop.mapreduce.CryptoUtils;
 import org.apache.hadoop.mapreduce.task.reduce.MapOutput.MapOutputComparator;
 import org.apache.hadoop.util.Progress;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -228,6 +230,10 @@
     return new InMemoryMerger(this);
   }
 
+  protected MergeThread<CompressAwarePath,K,V> createOnDiskMerger() {
+    return new OnDiskMerger(this);
+  }
+
   TaskAttemptID getReduceId() {
     return reduceId;
   }
@@ -453,11 +459,10 @@
                                            mergeOutputSize).suffix(
                                                Task.MERGED_OUTPUT_PREFIX);
 
-      Writer<K,V> writer = 
-        new Writer<K,V>(jobConf, rfs, outputPath,
-                        (Class<K>) jobConf.getMapOutputKeyClass(),
-                        (Class<V>) jobConf.getMapOutputValueClass(),
-                        codec, null);
+      FSDataOutputStream out = CryptoUtils.wrapIfNecessary(jobConf, rfs.create(outputPath));
+      Writer<K, V> writer = new Writer<K, V>(jobConf, out,
+          (Class<K>) jobConf.getMapOutputKeyClass(),
+          (Class<V>) jobConf.getMapOutputValueClass(), codec, null, true);
 
       RawKeyValueIterator rIter = null;
       CompressAwarePath compressAwarePath;
@@ -537,11 +542,12 @@
       Path outputPath = 
         localDirAllocator.getLocalPathForWrite(inputs.get(0).toString(), 
             approxOutputSize, jobConf).suffix(Task.MERGED_OUTPUT_PREFIX);
-      Writer<K,V> writer = 
-        new Writer<K,V>(jobConf, rfs, outputPath, 
-                        (Class<K>) jobConf.getMapOutputKeyClass(), 
-                        (Class<V>) jobConf.getMapOutputValueClass(),
-                        codec, null);
+
+      FSDataOutputStream out = CryptoUtils.wrapIfNecessary(jobConf, rfs.create(outputPath));
+      Writer<K, V> writer = new Writer<K, V>(jobConf, out,
+          (Class<K>) jobConf.getMapOutputKeyClass(),
+          (Class<V>) jobConf.getMapOutputValueClass(), codec, null, true);
+
       RawKeyValueIterator iter  = null;
       CompressAwarePath compressAwarePath;
       Path tmpDir = new Path(reduceId.toString());
@@ -717,8 +723,10 @@
             keyClass, valueClass, memDiskSegments, numMemDiskSegments,
             tmpDir, comparator, reporter, spilledRecordsCounter, null, 
             mergePhase);
-        Writer<K,V> writer = new Writer<K,V>(job, fs, outputPath,
-            keyClass, valueClass, codec, null);
+
+        FSDataOutputStream out = CryptoUtils.wrapIfNecessary(job, fs.create(outputPath));
+        Writer<K, V> writer = new Writer<K, V>(job, out, keyClass, valueClass,
+            codec, null, true);
         try {
           Merger.writeFile(rIter, writer, reporter, job);
           writer.close();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java
index 59bb04a..6e0e92b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java
@@ -37,6 +37,7 @@
 import org.apache.hadoop.mapred.MapOutputFile;
 
 import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.CryptoUtils;
 import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl.CompressAwarePath;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -75,7 +76,7 @@
     this.merger = merger;
     this.outputPath = outputPath;
     tmpOutputPath = getTempPath(outputPath, fetcher);
-    disk = fs.create(tmpOutputPath);
+    disk = CryptoUtils.wrapIfNecessary(conf, fs.create(tmpOutputPath));
   }
 
   @VisibleForTesting
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistCp.md.vm b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistCp.md.vm
index 41b381a..3e8de4f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistCp.md.vm
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistCp.md.vm
@@ -191,6 +191,26 @@
 
   If `-update` is used, `1` is overwritten as well.
 
+$H3 raw Namespace Extended Attribute Preservation
+
+  This section only applies to HDFS.
+
+  If the target and all of the source pathnames are in the /.reserved/raw
+  hierarchy, then 'raw' namespace extended attributes will be preserved.
+  'raw' xattrs are used by the system for internal functions such as encryption
+  meta data. They are only visible to users when accessed through the
+  /.reserved/raw hierarchy.
+
+  raw xattrs are preserved based solely on whether /.reserved/raw prefixes are
+  supplied. The -p (preserve, see below) flag does not impact preservation of
+  raw xattrs.
+
+  To prevent raw xattrs from being preserved, simply do not use the
+  /.reserved/raw prefix on any of the source and target paths.
+
+  If the /.reserved/raw prefix is specified on only a subset of the source and
+  target paths, an error will be displayed and a non-0 exit code returned.
+
 Command Line Options
 --------------------
 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java
index 1aea500..c5ab420 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMerger.java
@@ -24,14 +24,16 @@
 
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
+import java.security.PrivilegedAction;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.junit.Assert;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -51,10 +53,16 @@
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.TaskID;
 import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.security.TokenCache;
+import org.apache.hadoop.mapreduce.CryptoUtils;
 import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl;
+import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl.CompressAwarePath;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Progress;
 import org.apache.hadoop.util.Progressable;
 import org.junit.After;
@@ -63,40 +71,48 @@
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
+import com.google.common.collect.Lists;
+
 public class TestMerger {
 
   private Configuration conf;
   private JobConf jobConf;
   private FileSystem fs;
-  
+
   @Before
   public void setup() throws IOException {
     conf = new Configuration();
     jobConf = new JobConf();
     fs = FileSystem.getLocal(conf);
   }
-  
-  @After
-  public void cleanup() throws IOException {    
-    fs.delete(new Path(jobConf.getLocalDirs()[0]), true);
-  }
-  
+
+
   @Test
-  public void testInMemoryMerger() throws Throwable {
+  public void testEncryptedMerger() throws Throwable {
+    jobConf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
+    conf.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
+    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
+    TokenCache.setShuffleSecretKey(new byte[16], credentials);
+    UserGroupInformation.getCurrentUser().addCredentials(credentials);
+    testInMemoryAndOnDiskMerger();
+  }
+
+  @Test
+  public void testInMemoryAndOnDiskMerger() throws Throwable {
     JobID jobId = new JobID("a", 0);
-    TaskAttemptID reduceId = new TaskAttemptID(
+    TaskAttemptID reduceId1 = new TaskAttemptID(
         new TaskID(jobId, TaskType.REDUCE, 0), 0);
     TaskAttemptID mapId1 = new TaskAttemptID(
         new TaskID(jobId, TaskType.MAP, 1), 0);
     TaskAttemptID mapId2 = new TaskAttemptID(
         new TaskID(jobId, TaskType.MAP, 2), 0);
-    
+
     LocalDirAllocator lda = new LocalDirAllocator(MRConfig.LOCAL_DIR);
-    
+
     MergeManagerImpl<Text, Text> mergeManager = new MergeManagerImpl<Text, Text>(
-        reduceId, jobConf, fs, lda, Reporter.NULL, null, null, null, null, null,
+        reduceId1, jobConf, fs, lda, Reporter.NULL, null, null, null, null, null,
         null, null, new Progress(), new MROutputFiles());
-    
+
     // write map outputs
     Map<String, String> map1 = new TreeMap<String, String>();
     map1.put("apple", "disgusting");
@@ -113,32 +129,88 @@
         mapOutputBytes1.length);
     System.arraycopy(mapOutputBytes2, 0, mapOutput2.getMemory(), 0,
         mapOutputBytes2.length);
-    
+
     // create merger and run merge
     MergeThread<InMemoryMapOutput<Text, Text>, Text, Text> inMemoryMerger =
         mergeManager.createInMemoryMerger();
-    List<InMemoryMapOutput<Text, Text>> mapOutputs =
+    List<InMemoryMapOutput<Text, Text>> mapOutputs1 =
         new ArrayList<InMemoryMapOutput<Text, Text>>();
-    mapOutputs.add(mapOutput1);
-    mapOutputs.add(mapOutput2);
-    
-    inMemoryMerger.merge(mapOutputs);
-    
+    mapOutputs1.add(mapOutput1);
+    mapOutputs1.add(mapOutput2);
+
+    inMemoryMerger.merge(mapOutputs1);
+
     Assert.assertEquals(1, mergeManager.onDiskMapOutputs.size());
-    Path outPath = mergeManager.onDiskMapOutputs.iterator().next();
-    
+
+    TaskAttemptID reduceId2 = new TaskAttemptID(
+        new TaskID(jobId, TaskType.REDUCE, 3), 0);
+    TaskAttemptID mapId3 = new TaskAttemptID(
+        new TaskID(jobId, TaskType.MAP, 4), 0);
+    TaskAttemptID mapId4 = new TaskAttemptID(
+        new TaskID(jobId, TaskType.MAP, 5), 0);
+    // write map outputs
+    Map<String, String> map3 = new TreeMap<String, String>();
+    map3.put("apple", "awesome");
+    map3.put("carrot", "amazing");
+    Map<String, String> map4 = new TreeMap<String, String>();
+    map4.put("banana", "bla");
+    byte[] mapOutputBytes3 = writeMapOutput(conf, map3);
+    byte[] mapOutputBytes4 = writeMapOutput(conf, map4);
+    InMemoryMapOutput<Text, Text> mapOutput3 = new InMemoryMapOutput<Text, Text>(
+        conf, mapId3, mergeManager, mapOutputBytes3.length, null, true);
+    InMemoryMapOutput<Text, Text> mapOutput4 = new InMemoryMapOutput<Text, Text>(
+        conf, mapId4, mergeManager, mapOutputBytes4.length, null, true);
+    System.arraycopy(mapOutputBytes3, 0, mapOutput3.getMemory(), 0,
+        mapOutputBytes3.length);
+    System.arraycopy(mapOutputBytes4, 0, mapOutput4.getMemory(), 0,
+        mapOutputBytes4.length);
+
+//    // create merger and run merge
+    MergeThread<InMemoryMapOutput<Text, Text>, Text, Text> inMemoryMerger2 =
+        mergeManager.createInMemoryMerger();
+    List<InMemoryMapOutput<Text, Text>> mapOutputs2 =
+        new ArrayList<InMemoryMapOutput<Text, Text>>();
+    mapOutputs2.add(mapOutput3);
+    mapOutputs2.add(mapOutput4);
+
+    inMemoryMerger2.merge(mapOutputs2);
+
+    Assert.assertEquals(2, mergeManager.onDiskMapOutputs.size());
+
+    List<CompressAwarePath> paths = new ArrayList<CompressAwarePath>();
+    Iterator<CompressAwarePath> iterator = mergeManager.onDiskMapOutputs.iterator();
     List<String> keys = new ArrayList<String>();
     List<String> values = new ArrayList<String>();
-    readOnDiskMapOutput(conf, fs, outPath, keys, values);
-    Assert.assertEquals(keys, Arrays.asList("apple", "banana", "carrot"));
-    Assert.assertEquals(values, Arrays.asList("disgusting", "pretty good", "delicious"));
+    while (iterator.hasNext()) {
+      CompressAwarePath next = iterator.next();
+      readOnDiskMapOutput(conf, fs, next, keys, values);
+      paths.add(next);
+    }
+    Assert.assertEquals(keys, Arrays.asList("apple", "banana", "carrot", "apple", "banana", "carrot"));
+    Assert.assertEquals(values, Arrays.asList("awesome", "bla", "amazing", "disgusting", "pretty good", "delicious"));
+    mergeManager.close();
+
+    mergeManager = new MergeManagerImpl<Text, Text>(
+        reduceId2, jobConf, fs, lda, Reporter.NULL, null, null, null, null, null,
+        null, null, new Progress(), new MROutputFiles());
+
+    MergeThread<CompressAwarePath,Text,Text> onDiskMerger = mergeManager.createOnDiskMerger();
+    onDiskMerger.merge(paths);
+
+    Assert.assertEquals(1, mergeManager.onDiskMapOutputs.size());
+
+    keys = new ArrayList<String>();
+    values = new ArrayList<String>();
+    readOnDiskMapOutput(conf, fs, mergeManager.onDiskMapOutputs.iterator().next(), keys, values);
+    Assert.assertEquals(keys, Arrays.asList("apple", "apple", "banana", "banana", "carrot", "carrot"));
+    Assert.assertEquals(values, Arrays.asList("awesome", "disgusting", "pretty good", "bla", "amazing", "delicious"));
 
     mergeManager.close();
     Assert.assertEquals(0, mergeManager.inMemoryMapOutputs.size());
     Assert.assertEquals(0, mergeManager.inMemoryMergedMapOutputs.size());
     Assert.assertEquals(0, mergeManager.onDiskMapOutputs.size());
   }
-  
+
   private byte[] writeMapOutput(Configuration conf, Map<String, String> keysToValues)
       throws IOException {
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
@@ -152,11 +224,13 @@
     writer.close();
     return baos.toByteArray();
   }
-  
+
   private void readOnDiskMapOutput(Configuration conf, FileSystem fs, Path path,
       List<String> keys, List<String> values) throws IOException {
-    IFile.Reader<Text, Text> reader = new IFile.Reader<Text, Text>(conf, fs,
-        path, null, null);
+    FSDataInputStream in = CryptoUtils.wrapIfNecessary(conf, fs.open(path));
+
+    IFile.Reader<Text, Text> reader = new IFile.Reader<Text, Text>(conf, in,
+        fs.getFileStatus(path).getLen(), null, null);
     DataInputBuffer keyBuff = new DataInputBuffer();
     DataInputBuffer valueBuff = new DataInputBuffer();
     Text key = new Text();
@@ -169,17 +243,17 @@
       values.add(value.toString());
     }
   }
-  
+
   @Test
   public void testCompressed() throws IOException {
     testMergeShouldReturnProperProgress(getCompressedSegments());
-  }
-  
+}
+
   @Test
   public void testUncompressed() throws IOException {
     testMergeShouldReturnProperProgress(getUncompressedSegments());
   }
-  
+
   @SuppressWarnings( { "deprecation", "unchecked" })
   public void testMergeShouldReturnProperProgress(
       List<Segment<Text, Text>> segments) throws IOException {
@@ -212,7 +286,7 @@
     }
     return segments;
   }
-  
+
   private List<Segment<Text, Text>> getCompressedSegments() throws IOException {
     List<Segment<Text, Text>> segments = new ArrayList<Segment<Text, Text>>();
     for (int i = 1; i < 1; i++) {
@@ -220,7 +294,7 @@
     }
     return segments;
   }
-  
+
   private Segment<Text, Text> getUncompressedSegment(int i) throws IOException {
     return new Segment<Text, Text>(getReader(i), false);
   }
@@ -258,7 +332,7 @@
       }
     };
   }
-  
+
   private Answer<?> getValueAnswer(final String segmentName) {
     return new Answer<Void>() {
       int i = 0;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestIFile.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestIFile.java
index e3c7253..a314fc1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestIFile.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestIFile.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.mapred;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
@@ -42,7 +44,7 @@
     DefaultCodec codec = new GzipCodec();
     codec.setConf(conf);
     IFile.Writer<Text, Text> writer =
-      new IFile.Writer<Text, Text>(conf, rfs, path, Text.class, Text.class,
+      new IFile.Writer<Text, Text>(conf, rfs.create(path), Text.class, Text.class,
                                    codec, null);
     writer.close();
   }
@@ -56,12 +58,15 @@
     Path path = new Path(new Path("build/test.ifile"), "data");
     DefaultCodec codec = new GzipCodec();
     codec.setConf(conf);
+    FSDataOutputStream out = rfs.create(path);
     IFile.Writer<Text, Text> writer =
-        new IFile.Writer<Text, Text>(conf, rfs, path, Text.class, Text.class,
+        new IFile.Writer<Text, Text>(conf, out, Text.class, Text.class,
                                      codec, null);
     writer.close();
+    FSDataInputStream in = rfs.open(path);
     IFile.Reader<Text, Text> reader =
-      new IFile.Reader<Text, Text>(conf, rfs, path, codec, null);
+      new IFile.Reader<Text, Text>(conf, in, rfs.getFileStatus(path).getLen(),
+          codec, null);
     reader.close();
     
     // test check sum 
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRIntermediateDataEncryption.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRIntermediateDataEncryption.java
new file mode 100644
index 0000000..ebc32ad
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRIntermediateDataEncryption.java
@@ -0,0 +1,254 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+
+import static org.junit.Assert.*;
+
+@SuppressWarnings(value={"unchecked", "deprecation"})
+/**
+ * This test tests the support for a merge operation in Hadoop.  The input files
+ * are already sorted on the key.  This test implements an external
+ * MapOutputCollector implementation that just copies the records to different
+ * partitions while maintaining the sort order in each partition.  The Hadoop
+ * framework's merge on the reduce side will merge the partitions created to
+ * generate the final output which is sorted on the key.
+ */
+public class TestMRIntermediateDataEncryption {
+  // Where MR job's input will reside.
+  private static final Path INPUT_DIR = new Path("/test/input");
+  // Where output goes.
+  private static final Path OUTPUT = new Path("/test/output");
+
+  @Test
+  public void testSingleReducer() throws Exception {
+    doEncryptionTest(3, 1, 2);
+  }
+
+  @Test
+  public void testMultipleMapsPerNode() throws Exception {
+    doEncryptionTest(8, 1, 2);
+  }
+
+  @Test
+  public void testMultipleReducers() throws Exception {
+    doEncryptionTest(2, 4, 2);
+  }
+
+  public void doEncryptionTest(int numMappers, int numReducers, int numNodes) throws Exception {
+    doEncryptionTest(numMappers, numReducers, numNodes, 1000);
+  }
+
+  public void doEncryptionTest(int numMappers, int numReducers, int numNodes, int numLines) throws Exception {
+    MiniDFSCluster dfsCluster = null;
+    MiniMRClientCluster mrCluster = null;
+    FileSystem fileSystem = null;
+    try {
+      Configuration conf = new Configuration();
+      // Start the mini-MR and mini-DFS clusters
+
+      dfsCluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(numNodes).build();
+      fileSystem = dfsCluster.getFileSystem();
+      mrCluster = MiniMRClientClusterFactory.create(this.getClass(),
+                                                 numNodes, conf);
+      // Generate input.
+      createInput(fileSystem, numMappers, numLines);
+      // Run the test.
+      runMergeTest(new JobConf(mrCluster.getConfig()), fileSystem, numMappers, numReducers, numLines);
+    } finally {
+      if (dfsCluster != null) {
+        dfsCluster.shutdown();
+      }
+      if (mrCluster != null) {
+        mrCluster.stop();
+      }
+    }
+  }
+
+  private void createInput(FileSystem fs, int numMappers, int numLines) throws Exception {
+    fs.delete(INPUT_DIR, true);
+    for (int i = 0; i < numMappers; i++) {
+      OutputStream os = fs.create(new Path(INPUT_DIR, "input_" + i + ".txt"));
+      Writer writer = new OutputStreamWriter(os);
+      for (int j = 0; j < numLines; j++) {
+        // Create sorted key, value pairs.
+        int k = j + 1;
+        String formattedNumber = String.format("%09d", k);
+        writer.write(formattedNumber + " " + formattedNumber + "\n");
+      }
+      writer.close();
+    }
+  }
+
+  private void runMergeTest(JobConf job, FileSystem fileSystem, int numMappers, int numReducers, int numLines)
+    throws Exception {
+    fileSystem.delete(OUTPUT, true);
+    job.setJobName("Test");
+    JobClient client = new JobClient(job);
+    RunningJob submittedJob = null;
+    FileInputFormat.setInputPaths(job, INPUT_DIR);
+    FileOutputFormat.setOutputPath(job, OUTPUT);
+    job.set("mapreduce.output.textoutputformat.separator", " ");
+    job.setInputFormat(TextInputFormat.class);
+    job.setMapOutputKeyClass(Text.class);
+    job.setMapOutputValueClass(Text.class);
+    job.setOutputKeyClass(Text.class);
+    job.setOutputValueClass(Text.class);
+    job.setMapperClass(MyMapper.class);
+    job.setPartitionerClass(MyPartitioner.class);
+    job.setOutputFormat(TextOutputFormat.class);
+    job.setNumReduceTasks(numReducers);
+
+    job.setInt("mapreduce.map.maxattempts", 1);
+    job.setInt("mapreduce.reduce.maxattempts", 1);
+    job.setInt("mapred.test.num_lines", numLines);
+    job.setBoolean(MRJobConfig.MR_ENCRYPTED_INTERMEDIATE_DATA, true);
+    try {
+      submittedJob = client.submitJob(job);
+      try {
+        if (! client.monitorAndPrintJob(job, submittedJob)) {
+          throw new IOException("Job failed!");
+        }
+      } catch(InterruptedException ie) {
+        Thread.currentThread().interrupt();
+      }
+    } catch(IOException ioe) {
+      System.err.println("Job failed with: " + ioe);
+    } finally {
+      verifyOutput(submittedJob, fileSystem, numMappers, numLines);
+    }
+  }
+
+  private void verifyOutput(RunningJob submittedJob, FileSystem fileSystem, int numMappers, int numLines)
+    throws Exception {
+    FSDataInputStream dis = null;
+    long numValidRecords = 0;
+    long numInvalidRecords = 0;
+    String prevKeyValue = "000000000";
+    Path[] fileList =
+      FileUtil.stat2Paths(fileSystem.listStatus(OUTPUT,
+          new Utils.OutputFileUtils.OutputFilesFilter()));
+    for (Path outFile : fileList) {
+      try {
+        dis = fileSystem.open(outFile);
+        String record;
+        while((record = dis.readLine()) != null) {
+          // Split the line into key and value.
+          int blankPos = record.indexOf(" ");
+          String keyString = record.substring(0, blankPos);
+          String valueString = record.substring(blankPos+1);
+          // Check for sorted output and correctness of record.
+          if (keyString.compareTo(prevKeyValue) >= 0
+              && keyString.equals(valueString)) {
+            prevKeyValue = keyString;
+            numValidRecords++;
+          } else {
+            numInvalidRecords++;
+          }
+        }
+      } finally {
+        if (dis != null) {
+          dis.close();
+          dis = null;
+        }
+      }
+    }
+    // Make sure we got all input records in the output in sorted order.
+    assertEquals((long)(numMappers * numLines), numValidRecords);
+    // Make sure there is no extraneous invalid record.
+    assertEquals(0, numInvalidRecords);
+  }
+
+  /**
+   * A mapper implementation that assumes that key text contains valid integers
+   * in displayable form.
+   */
+  public static class MyMapper extends MapReduceBase
+    implements Mapper<LongWritable, Text, Text, Text> {
+      private Text keyText;
+      private Text valueText;
+
+      public MyMapper() {
+        keyText = new Text();
+        valueText = new Text();
+      }
+
+      @Override
+      public void map(LongWritable key, Text value,
+                      OutputCollector<Text, Text> output,
+                      Reporter reporter) throws IOException {
+        String record = value.toString();
+        int blankPos = record.indexOf(" ");
+        keyText.set(record.substring(0, blankPos));
+        valueText.set(record.substring(blankPos+1));
+        output.collect(keyText, valueText);
+      }
+
+      public void close() throws IOException {
+      }
+    }
+
+  /**
+   * Partitioner implementation to make sure that output is in total sorted
+   * order.  We basically route key ranges to different reducers such that
+   * key values monotonically increase with the partition number.  For example,
+   * in this test, the keys are numbers from 1 to 1000 in the form "000000001"
+   * to "000001000" in each input file.  The keys "000000001" to "000000250" are
+   * routed to partition 0, "000000251" to "000000500" are routed to partition 1
+   * and so on since we have 4 reducers.
+   */
+  static class MyPartitioner implements Partitioner<Text, Text> {
+
+    private JobConf job;
+
+    public MyPartitioner() {
+    }
+
+    public void configure(JobConf job) {
+      this.job = job;
+    }
+
+    public int getPartition(Text key, Text value, int numPartitions) {
+      int keyValue = 0;
+      try {
+        keyValue = Integer.parseInt(key.toString());
+      } catch(NumberFormatException nfe) {
+        keyValue = 0;
+      }
+      int partitionNumber = (numPartitions*(Math.max(0, keyValue-1)))/job.getInt("mapred.test.num_lines", 10000);
+      return partitionNumber;
+    }
+  }
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReduceTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReduceTask.java
index d3a0844..43fd948 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReduceTask.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestReduceTask.java
@@ -80,7 +80,7 @@
     FileSystem rfs = ((LocalFileSystem)localFs).getRaw();
     Path path = new Path(tmpDir, "data.in");
     IFile.Writer<Text, Text> writer = 
-      new IFile.Writer<Text, Text>(conf, rfs, path, Text.class, Text.class,
+      new IFile.Writer<Text, Text>(conf, rfs.create(path), Text.class, Text.class,
                                    codec, null);
     for(Pair p: vals) {
       writer.append(new Text(p.key), new Text(p.value));
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
index 69994f3..f447ebc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/pipes/TestPipeApplication.java
@@ -95,9 +95,9 @@
               new Counters.Counter(), new Progress());
       FileSystem fs = new RawLocalFileSystem();
       fs.setConf(conf);
-      Writer<IntWritable, Text> wr = new Writer<IntWritable, Text>(conf, fs,
-              new Path(workSpace + File.separator + "outfile"), IntWritable.class,
-              Text.class, null, null);
+      Writer<IntWritable, Text> wr = new Writer<IntWritable, Text>(conf, fs.create(
+              new Path(workSpace + File.separator + "outfile")), IntWritable.class,
+              Text.class, null, null, true);
       output.setWriter(wr);
       // stub for client
       File fCommand = getFileCommand("org.apache.hadoop.mapred.pipes.PipeApplicationRunnableStub");
@@ -177,9 +177,9 @@
               new Progress());
       FileSystem fs = new RawLocalFileSystem();
       fs.setConf(conf);
-      Writer<IntWritable, Text> wr = new Writer<IntWritable, Text>(conf, fs,
-              new Path(workSpace.getAbsolutePath() + File.separator + "outfile"),
-              IntWritable.class, Text.class, null, null);
+      Writer<IntWritable, Text> wr = new Writer<IntWritable, Text>(conf, fs.create(
+              new Path(workSpace.getAbsolutePath() + File.separator + "outfile")),
+              IntWritable.class, Text.class, null, null, true);
       output.setWriter(wr);
       conf.set(Submitter.PRESERVE_COMMANDFILE, "true");
 
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 7b25508..33f5a04 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -41,6 +41,8 @@
     <hadoop.component>UNDEF</hadoop.component>
     <bundle.snappy>false</bundle.snappy>
     <bundle.snappy.in.bin>false</bundle.snappy.in.bin>
+    <bundle.openssl>false</bundle.openssl>
+    <bundle.openssl.in.bin>false</bundle.openssl.in.bin>
   </properties>
   
   <dependencies>
@@ -351,6 +353,10 @@
                           cd "${snappy.lib}"
                           $$TAR *snappy* | (cd $${TARGET_DIR}/; $$UNTAR)
                         fi
+                        if [ "${bundle.openssl}" = "true" ] ; then
+                          cd "${openssl.lib}"
+                          $$TAR *crypto* | (cd $${TARGET_DIR}/; $$UNTAR)
+                        fi
                       fi
                       BIN_DIR="${BUILD_DIR}/bin"
                       if [ -d $${BIN_DIR} ] ; then
@@ -364,6 +370,12 @@
                             $$TAR *snappy* | (cd $${TARGET_BIN_DIR}/; $$UNTAR)
                           fi
                         fi
+                        if [ "${bundle.openssl.in.bin}" = "true" ] ; then
+                          if [ "${bundle.openssl}" = "true" ] ; then
+                            cd "${openssl.lib}"
+                            $$TAR *crypto* | (cd $${TARGET_BIN_DIR}/; $$UNTAR)
+                          fi
+                        fi
                       fi
                     </echo>
                     <exec executable="sh" dir="${project.build.directory}" failonerror="true">
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index c540cc1..be5b3d5 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1048,6 +1048,7 @@
         <!-- attempt to open a file at this path. -->
         <java.security.egd>file:/dev/urandom</java.security.egd>
         <bundle.snappy.in.bin>true</bundle.snappy.in.bin>
+        <bundle.openssl.in.bin>true</bundle.openssl.in.bin>
       </properties>
       <build>
         <plugins>
@@ -1058,6 +1059,7 @@
               <environmentVariables>
                 <!-- Specify where to look for the native DLL on Windows -->
                 <PATH>${env.PATH};${hadoop.common.build.dir}/bin;${snappy.lib}</PATH>
+                <PATH>${env.PATH};${hadoop.common.build.dir}/bin;${openssl.lib}</PATH>
               </environmentVariables>
             </configuration>
           </plugin>
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 3930ace..56288ee 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -90,6 +90,7 @@
       <item name="HDFS NFS Gateway" href="hadoop-project-dist/hadoop-hdfs/HdfsNfsGateway.html"/>
       <item name="HDFS Rolling Upgrade" href="hadoop-project-dist/hadoop-hdfs/HdfsRollingUpgrade.html"/>
       <item name="Extended Attributes" href="hadoop-project-dist/hadoop-hdfs/ExtendedAttributes.html"/>
+      <item name="Transparent Encryption" href="hadoop-project-dist/hadoop-hdfs/TransparentEncryption.html"/>
       <item name="HDFS Support for Multihoming" href="hadoop-project-dist/hadoop-hdfs/HdfsMultihoming.html"/>
     </menu>
 
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
index d1dba19..7e71096 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
@@ -42,6 +42,8 @@
   public static final String CONF_LABEL_LOG_PATH = "distcp.log.path";
   public static final String CONF_LABEL_IGNORE_FAILURES = "distcp.ignore.failures";
   public static final String CONF_LABEL_PRESERVE_STATUS = "distcp.preserve.status";
+  public static final String CONF_LABEL_PRESERVE_RAWXATTRS =
+      "distcp.preserve.rawxattrs";
   public static final String CONF_LABEL_SYNC_FOLDERS = "distcp.sync.folders";
   public static final String CONF_LABEL_DELETE_MISSING = "distcp.delete.missing.source";
   public static final String CONF_LABEL_SSL_CONF = "distcp.keystore.resource";
@@ -128,4 +130,8 @@
   public static final int MIN_RECORDS_PER_CHUNK_DEFAULT = 5;
   public static final int SPLIT_RATIO_DEFAULT  = 2;
 
+  /**
+   * Value of reserved raw HDFS directory when copying raw.* xattrs.
+   */
+  static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = "/.reserved/raw";
 }
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
index e77b6e1..c544813 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
@@ -48,7 +48,11 @@
       new Option("p", true, "preserve status (rbugpcax)(replication, " +
           "block-size, user, group, permission, checksum-type, ACL, XATTR).  " +
           "If -p is specified with no <arg>, then preserves replication, " +
-          "block size, user, group, permission and checksum type.")),
+          "block size, user, group, permission and checksum type." +
+          "raw.* xattrs are preserved when both the source and destination " +
+          "paths are in the /.reserved/raw hierarchy (HDFS only). raw.* xattr" +
+          "preservation is independent of the -p flag." +
+          "Refer to the DistCp documentation for more details.")),
 
   /**
    * Update target location by copying only files that are missing
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
index 1ed9ccd..a2a5be3 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
@@ -52,6 +52,8 @@
 
   private EnumSet<FileAttribute> preserveStatus = EnumSet.noneOf(FileAttribute.class);
 
+  private boolean preserveRawXattrs;
+
   private Path atomicWorkPath;
 
   private Path logPath;
@@ -123,6 +125,7 @@
       this.sslConfigurationFile = that.getSslConfigurationFile();
       this.copyStrategy = that.copyStrategy;
       this.preserveStatus = that.preserveStatus;
+      this.preserveRawXattrs = that.preserveRawXattrs;
       this.atomicWorkPath = that.getAtomicWorkPath();
       this.logPath = that.getLogPath();
       this.sourceFileListing = that.getSourceFileListing();
@@ -345,7 +348,7 @@
   }
 
   /**
-   * Checks if the input attibute should be preserved or not
+   * Checks if the input attribute should be preserved or not
    *
    * @param attribute - Attribute to check
    * @return True if attribute should be preserved, false otherwise
@@ -369,6 +372,21 @@
     preserveStatus.add(fileAttribute);
   }
 
+  /**
+   * Return true if raw.* xattrs should be preserved.
+   * @return true if raw.* xattrs should be preserved.
+   */
+  public boolean shouldPreserveRawXattrs() {
+    return preserveRawXattrs;
+  }
+
+  /**
+   * Indicate that raw.* xattrs should be preserved
+   */
+  public void preserveRawXattrs() {
+    preserveRawXattrs = true;
+  }
+
   /** Get work path for atomic commit. If null, the work
    * path would be parentOf(targetPath) + "/._WIP_" + nameOf(targetPath)
    *
@@ -565,6 +583,7 @@
         ", sourcePaths=" + sourcePaths +
         ", targetPath=" + targetPath +
         ", targetPathExists=" + targetPathExists +
+        ", preserveRawXattrs=" + preserveRawXattrs +
         '}';
   }
 
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
index ad29942..f9cfc86 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
@@ -37,6 +37,9 @@
 import java.io.*;
 import java.util.Stack;
 
+import static org.apache.hadoop.tools.DistCpConstants
+        .HDFS_RESERVED_RAW_DIRECTORY_NAME;
+
 /**
  * The SimpleCopyListing is responsible for making the exhaustive list of
  * all files/directories under its specified list of input-paths.
@@ -67,6 +70,10 @@
     Path targetPath = options.getTargetPath();
     FileSystem targetFS = targetPath.getFileSystem(getConf());
     boolean targetIsFile = targetFS.isFile(targetPath);
+    targetPath = targetFS.makeQualified(targetPath);
+    final boolean targetIsReservedRaw =
+        Path.getPathWithoutSchemeAndAuthority(targetPath).toString().
+            startsWith(HDFS_RESERVED_RAW_DIRECTORY_NAME);
 
     //If target is a file, then source has to be single file
     if (targetIsFile) {
@@ -93,6 +100,27 @@
       if (!fs.exists(path)) {
         throw new InvalidInputException(path + " doesn't exist");
       }
+      if (Path.getPathWithoutSchemeAndAuthority(path).toString().
+          startsWith(HDFS_RESERVED_RAW_DIRECTORY_NAME)) {
+        if (!targetIsReservedRaw) {
+          final String msg = "The source path '" + path + "' starts with " +
+              HDFS_RESERVED_RAW_DIRECTORY_NAME + " but the target path '" +
+              targetPath + "' does not. Either all or none of the paths must " +
+              "have this prefix.";
+          throw new InvalidInputException(msg);
+        }
+      } else if (targetIsReservedRaw) {
+        final String msg = "The target path '" + targetPath + "' starts with " +
+                HDFS_RESERVED_RAW_DIRECTORY_NAME + " but the source path '" +
+                path + "' does not. Either all or none of the paths must " +
+                "have this prefix.";
+        throw new InvalidInputException(msg);
+      }
+    }
+
+    if (targetIsReservedRaw) {
+      options.preserveRawXattrs();
+      getConf().setBoolean(DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, true);
     }
 
     /* This is requires to allow map tasks to access each of the source
@@ -135,6 +163,9 @@
     try {
       for (Path path: options.getSourcePaths()) {
         FileSystem sourceFS = path.getFileSystem(getConf());
+        final boolean preserveAcls = options.shouldPreserve(FileAttribute.ACL);
+        final boolean preserveXAttrs = options.shouldPreserve(FileAttribute.XATTR);
+        final boolean preserveRawXAttrs = options.shouldPreserveRawXattrs();
         path = makeQualified(path);
 
         FileStatus rootStatus = sourceFS.getFileStatus(path);
@@ -145,8 +176,7 @@
         if (!explore || rootStatus.isDirectory()) {
           CopyListingFileStatus rootCopyListingStatus =
             DistCpUtils.toCopyListingFileStatus(sourceFS, rootStatus,
-              options.shouldPreserve(FileAttribute.ACL), 
-              options.shouldPreserve(FileAttribute.XATTR));
+                preserveAcls, preserveXAttrs, preserveRawXAttrs);
           writeToFileListingRoot(fileListWriter, rootCopyListingStatus,
               sourcePathRoot, options);
         }
@@ -157,9 +187,9 @@
             }
             CopyListingFileStatus sourceCopyListingStatus =
               DistCpUtils.toCopyListingFileStatus(sourceFS, sourceStatus,
-                options.shouldPreserve(FileAttribute.ACL) &&
-                sourceStatus.isDirectory(), options.shouldPreserve(
-                    FileAttribute.XATTR) && sourceStatus.isDirectory());
+                  preserveAcls && sourceStatus.isDirectory(),
+                  preserveXAttrs && sourceStatus.isDirectory(),
+                  preserveRawXAttrs && sourceStatus.isDirectory());
             writeToFileListing(fileListWriter, sourceCopyListingStatus,
                 sourcePathRoot, options);
 
@@ -261,6 +291,9 @@
                                          DistCpOptions options)
                                          throws IOException {
     FileSystem sourceFS = sourcePathRoot.getFileSystem(getConf());
+    final boolean preserveAcls = options.shouldPreserve(FileAttribute.ACL);
+    final boolean preserveXAttrs = options.shouldPreserve(FileAttribute.XATTR);
+    final boolean preserveRawXattrs = options.shouldPreserveRawXattrs();
     Stack<FileStatus> pathStack = new Stack<FileStatus>();
     pathStack.push(sourceStatus);
 
@@ -271,8 +304,9 @@
                     + sourceStatus.getPath() + " for copy.");
         CopyListingFileStatus childCopyListingStatus =
           DistCpUtils.toCopyListingFileStatus(sourceFS, child,
-            options.shouldPreserve(FileAttribute.ACL) && child.isDirectory(), 
-            options.shouldPreserve(FileAttribute.XATTR) && child.isDirectory());
+            preserveAcls && child.isDirectory(),
+            preserveXAttrs && child.isDirectory(),
+            preserveRawXattrs && child.isDirectory());
         writeToFileListing(fileListWriter, childCopyListingStatus,
              sourcePathRoot, options);
         if (isDirectoryAndNotEmpty(sourceFS, child)) {
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index 4d16445..197edd9 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
@@ -83,7 +83,9 @@
     cleanupTempFiles(jobContext);
 
     String attributes = conf.get(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
-    if (attributes != null && !attributes.isEmpty()) {
+    final boolean preserveRawXattrs =
+        conf.getBoolean(DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false);
+    if ((attributes != null && !attributes.isEmpty()) || preserveRawXattrs) {
       preserveFileAttributesForDirectories(conf);
     }
 
@@ -167,6 +169,8 @@
     LOG.info("About to preserve attributes: " + attrSymbols);
 
     EnumSet<FileAttribute> attributes = DistCpUtils.unpackAttributes(attrSymbols);
+    final boolean preserveRawXattrs =
+        conf.getBoolean(DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false);
 
     Path sourceListing = new Path(conf.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH));
     FileSystem clusterFS = sourceListing.getFileSystem(conf);
@@ -194,7 +198,8 @@
         if (targetRoot.equals(targetFile) && syncOrOverwrite) continue;
 
         FileSystem targetFS = targetFile.getFileSystem(conf);
-        DistCpUtils.preserve(targetFS, targetFile, srcFileStatus,  attributes);
+        DistCpUtils.preserve(targetFS, targetFile, srcFileStatus, attributes,
+            preserveRawXattrs);
 
         taskAttemptContext.progress();
         taskAttemptContext.setStatus("Preserving status on directory entries. [" +
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
index 4ee003d..ab57127 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
@@ -200,6 +200,8 @@
 
     EnumSet<DistCpOptions.FileAttribute> fileAttributes
             = getFileAttributeSettings(context);
+    final boolean preserveRawXattrs = context.getConfiguration().getBoolean(
+        DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false);
 
     final String description = "Copying " + sourcePath + " to " + target;
     context.setStatus(description);
@@ -211,10 +213,12 @@
       FileSystem sourceFS;
       try {
         sourceFS = sourcePath.getFileSystem(conf);
+        final boolean preserveXAttrs =
+            fileAttributes.contains(FileAttribute.XATTR);
         sourceCurrStatus = DistCpUtils.toCopyListingFileStatus(sourceFS,
           sourceFS.getFileStatus(sourcePath),
           fileAttributes.contains(FileAttribute.ACL), 
-          fileAttributes.contains(FileAttribute.XATTR));
+          preserveXAttrs, preserveRawXattrs);
       } catch (FileNotFoundException e) {
         throw new IOException(new RetriableFileCopyCommand.CopyReadException(e));
       }
@@ -249,8 +253,8 @@
             action, fileAttributes);
       }
 
-      DistCpUtils.preserve(target.getFileSystem(conf), target,
-                           sourceCurrStatus, fileAttributes);
+      DistCpUtils.preserve(target.getFileSystem(conf), target, sourceCurrStatus,
+          fileAttributes, preserveRawXattrs);
     } catch (IOException exception) {
       handleFailures(exception, sourceFileStatus, target, context);
     }
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
index c7b29a1..abd30ee 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.tools.util;
 
+import com.google.common.collect.Maps;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -25,6 +26,7 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -151,7 +153,7 @@
    * @return - String containing first letters of each attribute to preserve
    */
   public static String packAttributes(EnumSet<FileAttribute> attributes) {
-    StringBuffer buffer = new StringBuffer(5);
+    StringBuffer buffer = new StringBuffer(FileAttribute.values().length);
     int len = 0;
     for (FileAttribute attribute : attributes) {
       buffer.append(attribute.name().charAt(0));
@@ -186,13 +188,15 @@
    * @param targetFS - File system
    * @param path - Path that needs to preserve original file status
    * @param srcFileStatus - Original file status
-   * @param attributes - Attribute set that need to be preserved
+   * @param attributes - Attribute set that needs to be preserved
+   * @param preserveRawXattrs if true, raw.* xattrs should be preserved
    * @throws IOException - Exception if any (particularly relating to group/owner
    *                       change or any transient error)
    */
   public static void preserve(FileSystem targetFS, Path path,
                               CopyListingFileStatus srcFileStatus,
-                              EnumSet<FileAttribute> attributes) throws IOException {
+                              EnumSet<FileAttribute> attributes,
+                              boolean preserveRawXattrs) throws IOException {
 
     FileStatus targetFileStatus = targetFS.getFileStatus(path);
     String group = targetFileStatus.getGroup();
@@ -214,15 +218,20 @@
       !srcFileStatus.getPermission().equals(targetFileStatus.getPermission())) {
       targetFS.setPermission(path, srcFileStatus.getPermission());
     }
-    
-    if (attributes.contains(FileAttribute.XATTR)) {
+
+    final boolean preserveXAttrs = attributes.contains(FileAttribute.XATTR);
+    if (preserveXAttrs || preserveRawXattrs) {
+      final String rawNS = XAttr.NameSpace.RAW.name().toLowerCase();
       Map<String, byte[]> srcXAttrs = srcFileStatus.getXAttrs();
       Map<String, byte[]> targetXAttrs = getXAttrs(targetFS, path);
-      if (!srcXAttrs.equals(targetXAttrs)) {
+      if (srcXAttrs != null && !srcXAttrs.equals(targetXAttrs)) {
         Iterator<Entry<String, byte[]>> iter = srcXAttrs.entrySet().iterator();
         while (iter.hasNext()) {
           Entry<String, byte[]> entry = iter.next();
-          targetFS.setXAttr(path, entry.getKey(), entry.getValue());
+          final String xattrName = entry.getKey();
+          if (xattrName.startsWith(rawNS) || preserveXAttrs) {
+            targetFS.setXAttr(path, entry.getKey(), entry.getValue());
+          }
         }
       }
     }
@@ -286,11 +295,12 @@
    * @param fileStatus FileStatus of file
    * @param preserveAcls boolean true if preserving ACLs
    * @param preserveXAttrs boolean true if preserving XAttrs
+   * @param preserveRawXAttrs boolean true if preserving raw.* XAttrs
    * @throws IOException if there is an I/O error
    */
   public static CopyListingFileStatus toCopyListingFileStatus(
       FileSystem fileSystem, FileStatus fileStatus, boolean preserveAcls, 
-      boolean preserveXAttrs) throws IOException {
+      boolean preserveXAttrs, boolean preserveRawXAttrs) throws IOException {
     CopyListingFileStatus copyListingFileStatus =
       new CopyListingFileStatus(fileStatus);
     if (preserveAcls) {
@@ -301,9 +311,25 @@
         copyListingFileStatus.setAclEntries(aclEntries);
       }
     }
-    if (preserveXAttrs) {
-      Map<String, byte[]> xAttrs = fileSystem.getXAttrs(fileStatus.getPath());
-      copyListingFileStatus.setXAttrs(xAttrs);
+    if (preserveXAttrs || preserveRawXAttrs) {
+      Map<String, byte[]> srcXAttrs = fileSystem.getXAttrs(fileStatus.getPath());
+      if (preserveXAttrs && preserveRawXAttrs) {
+         copyListingFileStatus.setXAttrs(srcXAttrs);
+      } else {
+        Map<String, byte[]> trgXAttrs = Maps.newHashMap();
+        final String rawNS = XAttr.NameSpace.RAW.name().toLowerCase();
+        for (Map.Entry<String, byte[]> ent : srcXAttrs.entrySet()) {
+          final String xattrName = ent.getKey();
+          if (xattrName.startsWith(rawNS)) {
+            if (preserveRawXAttrs) {
+              trgXAttrs.put(xattrName, ent.getValue());
+            }
+          } else if (preserveXAttrs) {
+            trgXAttrs.put(xattrName, ent.getValue());
+          }
+        }
+        copyListingFileStatus.setXAttrs(trgXAttrs);
+      }
     }
     return copyListingFileStatus;
   }
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
new file mode 100644
index 0000000..5aef51a
--- /dev/null
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithRawXAttrs.java
@@ -0,0 +1,170 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools;
+
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.tools.util.DistCpTestUtils;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+
+/**
+ * Tests distcp in combination with HDFS raw.* XAttrs.
+ */
+public class TestDistCpWithRawXAttrs {
+
+  private static MiniDFSCluster cluster;
+  private static Configuration conf;
+  private static FileSystem fs;
+
+  private static final String rawName1 = "raw.a1";
+  private static final byte[] rawValue1 = {0x37, 0x38, 0x39};
+  private static final String userName1 = "user.a1";
+  private static final byte[] userValue1 = {0x38, 0x38, 0x38};
+
+  private static final Path dir1 = new Path("/src/dir1");
+  private static final Path subDir1 = new Path(dir1, "subdir1");
+  private static final Path file1 = new Path("/src/file1");
+  private static final String rawRootName = "/.reserved/raw";
+  private static final String rootedDestName = "/dest";
+  private static final String rootedSrcName = "/src";
+  private static final String rawDestName = "/.reserved/raw/dest";
+  private static final String rawSrcName = "/.reserved/raw/src";
+
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true)
+            .build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+  }
+
+  @AfterClass
+  public static void shutdown() {
+    IOUtils.cleanup(null, fs);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  /* Test that XAttrs and raw.* XAttrs are preserved when appropriate. */
+  @Test
+  public void testPreserveRawXAttrs1() throws Exception {
+    final String relSrc = "/./.reserved/../.reserved/raw/../raw/src/../src";
+    final String relDst = "/./.reserved/../.reserved/raw/../raw/dest/../dest";
+    doTestPreserveRawXAttrs(relSrc, relDst, "-px", true, true,
+        DistCpConstants.SUCCESS);
+    doTestPreserveRawXAttrs(rootedSrcName, rootedDestName, "-px",
+        false, true, DistCpConstants.SUCCESS);
+    doTestPreserveRawXAttrs(rootedSrcName, rawDestName, "-px",
+        false, true, DistCpConstants.INVALID_ARGUMENT);
+    doTestPreserveRawXAttrs(rawSrcName, rootedDestName, "-px",
+        false, true, DistCpConstants.INVALID_ARGUMENT);
+    doTestPreserveRawXAttrs(rawSrcName, rawDestName, "-px",
+        true, true, DistCpConstants.SUCCESS);
+    final Path savedWd = fs.getWorkingDirectory();
+    try {
+      fs.setWorkingDirectory(new Path("/.reserved/raw"));
+      doTestPreserveRawXAttrs("../.." + rawSrcName, "../.." + rawDestName,
+              "-px", true, true, DistCpConstants.SUCCESS);
+    } finally {
+      fs.setWorkingDirectory(savedWd);
+    }
+  }
+
+  /* Test that XAttrs are not preserved and raw.* are when appropriate. */
+  @Test
+  public void testPreserveRawXAttrs2() throws Exception {
+    doTestPreserveRawXAttrs(rootedSrcName, rootedDestName, "-p",
+        false, false, DistCpConstants.SUCCESS);
+    doTestPreserveRawXAttrs(rootedSrcName, rawDestName, "-p",
+        false, false, DistCpConstants.INVALID_ARGUMENT);
+    doTestPreserveRawXAttrs(rawSrcName, rootedDestName, "-p",
+        false, false, DistCpConstants.INVALID_ARGUMENT);
+    doTestPreserveRawXAttrs(rawSrcName, rawDestName, "-p",
+        true, false, DistCpConstants.SUCCESS);
+  }
+
+  /* Test that XAttrs are not preserved and raw.* are when appropriate. */
+  @Test
+  public void testPreserveRawXAttrs3() throws Exception {
+    doTestPreserveRawXAttrs(rootedSrcName, rootedDestName, null,
+        false, false, DistCpConstants.SUCCESS);
+    doTestPreserveRawXAttrs(rootedSrcName, rawDestName, null,
+        false, false, DistCpConstants.INVALID_ARGUMENT);
+    doTestPreserveRawXAttrs(rawSrcName, rootedDestName, null,
+        false, false, DistCpConstants.INVALID_ARGUMENT);
+    doTestPreserveRawXAttrs(rawSrcName, rawDestName, null,
+        true, false, DistCpConstants.SUCCESS);
+  }
+
+  private static Path[] pathnames = { new Path("dir1"),
+                                      new Path("dir1/subdir1"),
+                                      new Path("file1") };
+
+  private static void makeFilesAndDirs(FileSystem fs) throws Exception {
+    fs.delete(new Path("/src"), true);
+    fs.delete(new Path("/dest"), true);
+    fs.mkdirs(subDir1);
+    fs.create(file1).close();
+  }
+
+  private void initXAttrs() throws Exception {
+    makeFilesAndDirs(fs);
+    for (Path p : pathnames) {
+      fs.setXAttr(new Path(rawRootName + "/src", p), rawName1, rawValue1);
+      fs.setXAttr(new Path(rawRootName + "/src", p), userName1, userValue1);
+    }
+  }
+
+  private void doTestPreserveRawXAttrs(String src, String dest,
+      String preserveOpts, boolean expectRaw, boolean expectUser,
+      int expectedExitCode) throws Exception {
+    initXAttrs();
+
+    DistCpTestUtils.assertRunDistCp(expectedExitCode, src, dest,
+        preserveOpts, conf);
+
+    if (expectedExitCode == DistCpConstants.SUCCESS) {
+      Map<String, byte[]> xAttrs = Maps.newHashMap();
+      for (Path p : pathnames) {
+        xAttrs.clear();
+        if (expectRaw) {
+          xAttrs.put(rawName1, rawValue1);
+        }
+        if (expectUser) {
+          xAttrs.put(userName1, userValue1);
+        }
+        DistCpTestUtils.assertXAttrs(new Path(dest, p), fs, xAttrs);
+      }
+    }
+  }
+}
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithXAttrs.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithXAttrs.java
index cc13b8f..6b0e2b22 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithXAttrs.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithXAttrs.java
@@ -18,13 +18,9 @@
 
 package org.apache.hadoop.tools;
 
-import static org.junit.Assert.*;
-
 import java.io.IOException;
 import java.net.URI;
-import java.util.Iterator;
 import java.util.Map;
-import java.util.Map.Entry;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -37,8 +33,8 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.tools.util.DistCpTestUtils;
 import org.apache.hadoop.util.Progressable;
-import org.apache.hadoop.util.ToolRunner;
 
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -79,6 +75,7 @@
   private static final Path dstFile2 = new Path(dstDir2, "file2");
   private static final Path dstFile3 = new Path(dstDir2, "file3");
   private static final Path dstFile4 = new Path(dstDir2, "file4");
+  private static final String rootedSrcName = "/src";
 
   @BeforeClass
   public static void init() throws Exception {
@@ -125,55 +122,56 @@
 
   @Test
   public void testPreserveXAttrs() throws Exception {
-    assertRunDistCp(DistCpConstants.SUCCESS, "/dstPreserveXAttrs");
+    DistCpTestUtils.assertRunDistCp(DistCpConstants.SUCCESS, rootedSrcName,
+        "/dstPreserveXAttrs", "-px", conf);
 
     // dstDir1
     Map<String, byte[]> xAttrs = Maps.newHashMap();
     xAttrs.put(name1, value1);
     xAttrs.put(name2, value2);
-    assertXAttrs(dstDir1, xAttrs);
+    DistCpTestUtils.assertXAttrs(dstDir1, fs, xAttrs);
     
     // dstSubDir1
     xAttrs.clear();
     xAttrs.put(name1, value1);
     xAttrs.put(name3, new byte[0]);
-    assertXAttrs(dstSubDir1, xAttrs);
+    DistCpTestUtils.assertXAttrs(dstSubDir1, fs, xAttrs);
     
     // dstFile1
     xAttrs.clear();
     xAttrs.put(name1, value1);
     xAttrs.put(name2, value2);
     xAttrs.put(name3, new byte[0]);
-    assertXAttrs(dstFile1, xAttrs);
+    DistCpTestUtils.assertXAttrs(dstFile1, fs, xAttrs);
     
     // dstDir2
     xAttrs.clear();
     xAttrs.put(name2, value2);
-    assertXAttrs(dstDir2, xAttrs);
+    DistCpTestUtils.assertXAttrs(dstDir2, fs, xAttrs);
     
     // dstFile2
     xAttrs.clear();
     xAttrs.put(name1, value1);
     xAttrs.put(name4, new byte[0]);
-    assertXAttrs(dstFile2, xAttrs);
+    DistCpTestUtils.assertXAttrs(dstFile2, fs, xAttrs);
     
     // dstFile3
     xAttrs.clear();
     xAttrs.put(name3, new byte[0]);
     xAttrs.put(name4, new byte[0]);
-    assertXAttrs(dstFile3, xAttrs);
+    DistCpTestUtils.assertXAttrs(dstFile3, fs, xAttrs);
     
     // dstFile4
     xAttrs.clear();
-    assertXAttrs(dstFile4, xAttrs);
+    DistCpTestUtils.assertXAttrs(dstFile4, fs, xAttrs);
   }
 
   @Test
   public void testXAttrsNotEnabled() throws Exception {
     try {
       restart(false);
-      assertRunDistCp(DistCpConstants.XATTRS_NOT_SUPPORTED, 
-          "/dstXAttrsNotEnabled");
+      DistCpTestUtils.assertRunDistCp(DistCpConstants.XATTRS_NOT_SUPPORTED,
+          rootedSrcName, "/dstXAttrsNotEnabled", "-px", conf);
     } finally {
       restart(true);
     }
@@ -181,8 +179,8 @@
 
   @Test
   public void testXAttrsNotImplemented() throws Exception {
-    assertRunDistCp(DistCpConstants.XATTRS_NOT_SUPPORTED,
-        "stubfs://dstXAttrsNotImplemented");
+    DistCpTestUtils.assertRunDistCp(DistCpConstants.XATTRS_NOT_SUPPORTED,
+        rootedSrcName, "stubfs://dstXAttrsNotImplemented", "-px", conf);
   }
 
   /**
@@ -252,45 +250,6 @@
   }
 
   /**
-   * Asserts the XAttrs returned by getXAttrs for a specific path.
-   * 
-   * @param path String path to check
-   * @param xAttrs XAttr[] expected xAttrs
-   * @throws Exception if there is any error
-   */
-  private static void assertXAttrs(Path path, Map<String, byte[]> expectedXAttrs)
-      throws Exception {
-    Map<String, byte[]> xAttrs = fs.getXAttrs(path);
-    assertEquals(expectedXAttrs.size(), xAttrs.size());
-    Iterator<Entry<String, byte[]>> i = expectedXAttrs.entrySet().iterator();
-    while (i.hasNext()) {
-      Entry<String, byte[]> e = i.next();
-      String name = e.getKey();
-      byte[] value = e.getValue();
-      if (value == null) {
-        assertTrue(xAttrs.containsKey(name) && xAttrs.get(name) == null);
-      } else {
-        assertArrayEquals(value, xAttrs.get(name));
-      }
-    }
-  }
-
-  /**
-   * Runs distcp from /src to specified destination, preserving XAttrs. Asserts
-   * expected exit code.
-   * 
-   * @param int exitCode expected exit code
-   * @param dst String distcp destination
-   * @throws Exception if there is any error
-   */
-  private static void assertRunDistCp(int exitCode, String dst)
-      throws Exception {
-    DistCp distCp = new DistCp(conf, null);
-    assertEquals(exitCode,
-        ToolRunner.run(conf, distCp, new String[] { "-px", "/src", dst }));
-  }
-
-  /**
    * Initialize the cluster, wait for it to become active, and get FileSystem.
    * 
    * @param format if true, format the NameNode and DataNodes before starting up
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
index d3da68e..2c42269 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
@@ -357,7 +357,8 @@
     DistCpOptions option = new DistCpOptions(new Path("abc"), new Path("xyz"));
     String val = "DistCpOptions{atomicCommit=false, syncFolder=false, deleteMissing=false, " +
         "ignoreFailures=false, maxMaps=20, sslConfigurationFile='null', copyStrategy='uniformsize', " +
-        "sourceFileListing=abc, sourcePaths=null, targetPath=xyz, targetPathExists=true}";
+        "sourceFileListing=abc, sourcePaths=null, targetPath=xyz, targetPathExists=true, " +
+        "preserveRawXattrs=false}";
     Assert.assertEquals(val, option.toString());
     Assert.assertNotSame(DistCpOptionSwitch.ATOMIC_COMMIT.toString(),
         DistCpOptionSwitch.ATOMIC_COMMIT.name());
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/DistCpTestUtils.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/DistCpTestUtils.java
new file mode 100644
index 0000000..2721638
--- /dev/null
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/DistCpTestUtils.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools.util;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import org.apache.hadoop.tools.DistCp;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * Utility class for DistCpTests
+ */
+public class DistCpTestUtils {
+
+   /**
+    * Asserts the XAttrs returned by getXAttrs for a specific path match an
+    * expected set of XAttrs.
+    *
+    * @param path String path to check
+    * @param fs FileSystem to use for the path
+    * @param expectedXAttrs XAttr[] expected xAttrs
+    * @throws Exception if there is any error
+    */
+  public static void assertXAttrs(Path path, FileSystem fs,
+      Map<String, byte[]> expectedXAttrs)
+      throws Exception {
+    Map<String, byte[]> xAttrs = fs.getXAttrs(path);
+    assertEquals(path.toString(), expectedXAttrs.size(), xAttrs.size());
+    Iterator<Entry<String, byte[]>> i = expectedXAttrs.entrySet().iterator();
+    while (i.hasNext()) {
+      Entry<String, byte[]> e = i.next();
+      String name = e.getKey();
+      byte[] value = e.getValue();
+      if (value == null) {
+        assertTrue(xAttrs.containsKey(name) && xAttrs.get(name) == null);
+      } else {
+        assertArrayEquals(value, xAttrs.get(name));
+      }
+    }
+  }
+
+  /**
+   * Runs distcp from src to dst, preserving XAttrs. Asserts the
+   * expected exit code.
+   *
+   * @param exitCode expected exit code
+   * @param src distcp src path
+   * @param dst distcp destination
+   * @param options distcp command line options
+   * @param conf Configuration to use
+   * @throws Exception if there is any error
+   */
+  public static void assertRunDistCp(int exitCode, String src, String dst,
+      String options, Configuration conf)
+      throws Exception {
+    DistCp distCp = new DistCp(conf, null);
+    String[] optsArr = options == null ?
+        new String[] { src, dst } :
+        new String[] { options, src, dst };
+    assertEquals(exitCode,
+        ToolRunner.run(conf, distCp, optsArr));
+  }
+}
diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
index 4825e15..c4b64b9 100644
--- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
+++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
@@ -114,14 +114,14 @@
       fs.setPermission(path, noPerm);
       fs.setOwner(path, "nobody", "nobody");
 
-      DistCpUtils.preserve(fs, path, srcStatus, attributes);
+      DistCpUtils.preserve(fs, path, srcStatus, attributes, false);
       FileStatus target = fs.getFileStatus(path);
       Assert.assertEquals(target.getPermission(), noPerm);
       Assert.assertEquals(target.getOwner(), "nobody");
       Assert.assertEquals(target.getGroup(), "nobody");
 
       attributes.add(FileAttribute.PERMISSION);
-      DistCpUtils.preserve(fs, path, srcStatus, attributes);
+      DistCpUtils.preserve(fs, path, srcStatus, attributes, false);
       target = fs.getFileStatus(path);
       Assert.assertEquals(target.getPermission(), srcStatus.getPermission());
       Assert.assertEquals(target.getOwner(), "nobody");
@@ -129,7 +129,7 @@
 
       attributes.add(FileAttribute.GROUP);
       attributes.add(FileAttribute.USER);
-      DistCpUtils.preserve(fs, path, srcStatus, attributes);
+      DistCpUtils.preserve(fs, path, srcStatus, attributes, false);
       target = fs.getFileStatus(path);
       Assert.assertEquals(target.getPermission(), srcStatus.getPermission());
       Assert.assertEquals(target.getOwner(), srcStatus.getOwner());