HDFS-7011. Implement basic utilities for libhdfs3 (cmccabe)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake
index 322ecdb..eb74bd8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake
@@ -35,8 +35,13 @@
 ELSE(ENABLE_DEBUG STREQUAL ON)
     SET(CMAKE_BUILD_TYPE RelWithDebInfo CACHE 
         STRING "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel." FORCE)
+    SET(CMAKE_CXX_FLAGS_DEBUG "-g -O2" CACHE STRING "compiler flags for RelWithDebInfo" FORCE)
+    SET(CMAKE_C_FLAGS_DEBUG "-g -O2" CACHE STRING "compiler flags for debug" FORCE)
 ENDIF(ENABLE_DEBUG STREQUAL ON)
 
+SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-strict-aliasing")
+SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-strict-aliasing")
+
 IF(ENABLE_COVERAGE STREQUAL ON)
     INCLUDE(CodeCoverage)
 ENDIF(ENABLE_COVERAGE STREQUAL ON)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Atomic.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Atomic.h
new file mode 100644
index 0000000..d5fb518
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Atomic.h
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_ATOMIC_H_
+#define _HDFS_LIBHDFS3_COMMON_ATOMIC_H_
+
+#include "platform.h"
+
+#ifdef NEED_BOOST
+
+#include <boost/atomic.hpp>
+
+namespace hdfs {
+namespace internal {
+
+using boost::atomic;
+
+}
+}
+
+#else
+
+#include <atomic>
+
+namespace hdfs {
+namespace internal {
+
+using std::atomic;
+
+}
+}
+#endif
+
+#endif /* _HDFS_LIBHDFS3_COMMON_ATOMIC_H_ */
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/BigEndian.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/BigEndian.h
new file mode 100644
index 0000000..ab508ce
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/BigEndian.h
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_BIGENDIAN_H_
+#define _HDFS_LIBHDFS3_COMMON_BIGENDIAN_H_
+
+#include <arpa/inet.h>
+#include <stdint.h>
+#include <string.h>
+
+namespace hdfs {
+namespace internal {
+
+static inline int16_t ReadBigEndian16FromArray(const char * buffer) {
+    int16_t retval;
+    retval = ntohs(*reinterpret_cast<const int16_t *>(buffer));
+    return retval;
+}
+
+static inline int32_t ReadBigEndian32FromArray(const char * buffer) {
+    int32_t retval;
+    retval = ntohl(*reinterpret_cast<const int32_t *>(buffer));
+    return retval;
+}
+
+static inline char * WriteBigEndian16ToArray(int16_t value, char * buffer) {
+    int16_t bigValue = htons(value);
+    memcpy(buffer, reinterpret_cast<const char *>(&bigValue), sizeof(int16_t));
+    return buffer + sizeof(int16_t);
+}
+
+static inline char * WriteBigEndian32ToArray(int32_t value, char * buffer) {
+    int32_t bigValue = htonl(value);
+    memcpy(buffer, reinterpret_cast<const char *>(&bigValue), sizeof(int32_t));
+    return buffer + sizeof(int32_t);
+}
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_BIGENDIAN_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/CFileWrapper.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/CFileWrapper.cc
new file mode 100644
index 0000000..15eeffa
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/CFileWrapper.cc
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FileWrapper.h"
+
+#include <limits>
+#include <string>
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+
+namespace hdfs {
+namespace internal {
+
+CFileWrapper::CFileWrapper() :
+    file(NULL) {
+}
+
+CFileWrapper::~CFileWrapper() {
+    close();
+}
+
+bool CFileWrapper::open(int fd, bool delegate) {
+    assert(false && "not implemented");
+    abort();
+    return false;
+}
+
+bool CFileWrapper::open(const std::string &path) {
+    this->path = path;
+    file = fopen(path.c_str(), "rb");
+    return NULL != file;
+}
+
+void CFileWrapper::close() {
+    if (NULL != file) {
+        fclose(file);
+        file = NULL;
+    }
+}
+
+const char *CFileWrapper::read(std::vector<char> &buffer, int32_t size) {
+    buffer.resize(size);
+    copy(&buffer[0], size);
+    return &buffer[0];
+}
+
+void CFileWrapper::copy(char *buffer, int32_t size) {
+    int32_t todo = size, done;
+
+    while (todo > 0) {
+        done = fread(buffer + (size - todo), sizeof(char), todo, file);
+
+        if (done < 0) {
+            THROW(HdfsIOException, "Cannot read file \"%s\", %s.", path.c_str(),
+                  GetSystemErrorInfo(errno));
+        } else if (0 == done) {
+            THROW(HdfsIOException, "Cannot read file \"%s\", End of file.",
+                  path.c_str());
+        }
+
+        todo -= done;
+    }
+}
+
+void CFileWrapper::seek(int64_t offset) {
+    assert(offset > 0);
+    int64_t todo = offset, batch;
+    bool seek_set = true;
+
+    while (todo > 0) {
+        batch = todo < std::numeric_limits<long>::max() ?
+                todo : std::numeric_limits<long>::max();
+        off_t rc = fseek(file, static_cast<long>(batch),
+                         seek_set ? SEEK_SET : SEEK_CUR);
+        seek_set = false;
+
+        if (rc != 0) {
+            THROW(HdfsIOException, "Cannot lseek file: %s, %s", path.c_str(),
+                  GetSystemErrorInfo(errno));
+        }
+
+        todo -= batch;
+    }
+}
+
+}
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Checksum.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Checksum.h
new file mode 100644
index 0000000..d377e6d6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Checksum.h
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_CHECKSUM_H_
+#define _HDFS_LIBHDFS3_COMMON_CHECKSUM_H_
+
+#include <stdint.h>
+
+#define CHECKSUM_TYPE_SIZE 1
+#define CHECKSUM_BYTES_PER_CHECKSUM_SIZE 4
+#define CHECKSUM_TYPE_CRC32C 2
+
+namespace hdfs {
+namespace internal {
+
+/**
+ * An abstract base CRC class.
+ */
+class Checksum {
+public:
+    /**
+     * @return Returns the current checksum value.
+     */
+    virtual uint32_t getValue() = 0;
+
+    /**
+     * Resets the checksum to its initial value.
+     */
+    virtual void reset() = 0;
+
+    /**
+     * Updates the current checksum with the specified array of bytes.
+     * @param b The buffer of data.
+     * @param len The buffer length.
+     */
+    virtual void update(const void * b, int len) = 0;
+
+    /**
+     * Destroy the instance.
+     */
+    virtual ~Checksum() {
+    }
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_CHECKSUM_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/DateTime.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/DateTime.h
new file mode 100644
index 0000000..d848552
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/DateTime.h
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_DATETIME_H_
+#define _HDFS_LIBHDFS3_COMMON_DATETIME_H_
+
+#include "platform.h"
+
+#include <ctime>
+#include <cassert>
+
+#ifdef NEED_BOOST
+
+#include <boost/chrono.hpp>
+
+namespace hdfs {
+namespace internal {
+
+using namespace boost::chrono;
+
+}
+}
+
+#else
+
+#include <chrono>
+
+namespace hdfs {
+namespace internal {
+
+using namespace std::chrono;
+
+}
+}
+#endif
+
+namespace hdfs {
+namespace internal {
+
+template<typename TimeStamp>
+static int64_t ToMilliSeconds(TimeStamp const & s, TimeStamp const & e) {
+    assert(e >= s);
+    return duration_cast<milliseconds>(e - s).count();
+}
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_DATETIME_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Exception.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Exception.cc
new file mode 100644
index 0000000..c693ca1
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Exception.cc
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Exception.h"
+
+#include <sstream>
+
+namespace hdfs {
+
+const char *HdfsIOException::ReflexName = "java.io.IOException";
+
+const char *AlreadyBeingCreatedException::ReflexName =
+    "org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException";
+
+const char *AccessControlException::ReflexName =
+    "org.apache.hadoop.security.AccessControlException";
+
+const char *FileAlreadyExistsException::ReflexName =
+    "org.apache.hadoop.fs.FileAlreadyExistsException";
+
+const char *DSQuotaExceededException::ReflexName =
+    "org.apache.hadoop.hdfs.protocol.DSQuotaExceededException";
+
+const char *NSQuotaExceededException::ReflexName =
+    "org.apache.hadoop.hdfs.protocol.NSQuotaExceededException";
+
+const char *ParentNotDirectoryException::ReflexName =
+    "org.apache.hadoop.fs.ParentNotDirectoryException";
+
+const char *SafeModeException::ReflexName =
+    "org.apache.hadoop.hdfs.server.namenode.SafeModeException";
+
+const char *NotReplicatedYetException::ReflexName =
+    "org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException";
+
+const char *FileNotFoundException::ReflexName = "java.io.FileNotFoundException";
+
+const char *UnresolvedLinkException::ReflexName =
+    "org.apache.hadoop.fs.UnresolvedLinkException";
+
+const char *UnsupportedOperationException::ReflexName =
+    "java.lang.UnsupportedOperationException";
+
+const char *ReplicaNotFoundException::ReflexName =
+    "org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException";
+
+const char *NameNodeStandbyException::ReflexName =
+    "org.apache.hadoop.ipc.StandbyException";
+
+const char *HdfsInvalidBlockToken::ReflexName =
+    "org.apache.hadoop.security.token.SecretManager$InvalidToken";
+
+const char *SaslException::ReflexName = "javax.security.sasl.SaslException";
+
+const char *RpcNoSuchMethodException::ReflexName = "org.apache.hadoop.ipc.RpcNoSuchMethodException";
+
+const char *InvalidParameter::ReflexName = "java.lang.IllegalArgumentException";
+
+HdfsException::HdfsException(const std::string &arg, const char *file,
+                             int line, const char *stack) :
+    std::runtime_error(arg) {
+    std::ostringstream ss;
+    ss << file << ": " << line << ": " << arg << std::endl << stack;
+    detail = ss.str();
+}
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Exception.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Exception.h
new file mode 100644
index 0000000..71af81f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Exception.h
@@ -0,0 +1,504 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_EXCEPTION_H_
+#define _HDFS_LIBHDFS3_COMMON_EXCEPTION_H_
+
+#include <stdexcept>
+#include <string>
+
+namespace hdfs {
+
+class HdfsException: public std::runtime_error {
+public:
+    HdfsException(const std::string &arg, const char *file, int line,
+                  const char *stack);
+
+    ~HdfsException() throw () {
+    }
+
+    virtual const char *msg() const {
+        return detail.c_str();
+    }
+
+protected:
+    std::string detail;
+};
+
+class HdfsIOException: public HdfsException {
+public:
+    HdfsIOException(const std::string &arg, const char *file, int line,
+                    const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsIOException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class HdfsNetworkException: public HdfsIOException {
+public:
+    HdfsNetworkException(const std::string &arg, const char *file, int line,
+                         const char *stack) :
+        HdfsIOException(arg, file, line, stack) {
+    }
+
+    ~HdfsNetworkException() throw () {
+    }
+};
+
+class HdfsNetworkConnectException: public HdfsNetworkException {
+public:
+    HdfsNetworkConnectException(const std::string &arg, const char *file, int line,
+                                const char *stack) :
+        HdfsNetworkException(arg, file, line, stack) {
+    }
+
+    ~HdfsNetworkConnectException() throw () {
+    }
+};
+
+class AccessControlException: public HdfsException {
+public:
+    AccessControlException(const std::string &arg, const char *file, int line,
+                           const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~AccessControlException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class AlreadyBeingCreatedException: public HdfsException {
+public:
+    AlreadyBeingCreatedException(const std::string &arg, const char *file,
+                                 int line, const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~AlreadyBeingCreatedException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class ChecksumException: public HdfsException {
+public:
+    ChecksumException(const std::string &arg, const char *file, int line,
+                      const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~ChecksumException() throw () {
+    }
+};
+
+class DSQuotaExceededException: public HdfsException {
+public:
+    DSQuotaExceededException(const std::string &arg, const char *file,
+                             int line, const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~DSQuotaExceededException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class FileAlreadyExistsException: public HdfsException {
+public:
+    FileAlreadyExistsException(const std::string &arg, const char *file,
+                               int line, const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~FileAlreadyExistsException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class FileNotFoundException: public HdfsException {
+public:
+    FileNotFoundException(const std::string &arg, const char *file, int line,
+                          const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~FileNotFoundException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class HdfsBadBoolFoumat: public HdfsException {
+public:
+    HdfsBadBoolFoumat(const std::string &arg, const char *file, int line,
+                      const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsBadBoolFoumat() throw () {
+    }
+};
+
+class HdfsBadConfigFoumat: public HdfsException {
+public:
+    HdfsBadConfigFoumat(const std::string &arg, const char *file, int line,
+                        const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsBadConfigFoumat() throw () {
+    }
+};
+
+class HdfsBadNumFoumat: public HdfsException {
+public:
+    HdfsBadNumFoumat(const std::string &arg, const char *file, int line,
+                     const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsBadNumFoumat() throw () {
+    }
+};
+
+class HdfsCanceled: public HdfsException {
+public:
+    HdfsCanceled(const std::string &arg, const char *file, int line,
+                 const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsCanceled() throw () {
+    }
+};
+
+class HdfsFileSystemClosed: public HdfsException {
+public:
+    HdfsFileSystemClosed(const std::string &arg, const char *file, int line,
+                         const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsFileSystemClosed() throw () {
+    }
+};
+
+class HdfsConfigInvalid: public HdfsException {
+public:
+    HdfsConfigInvalid(const std::string &arg, const char *file, int line,
+                      const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsConfigInvalid() throw () {
+    }
+};
+
+class HdfsConfigNotFound: public HdfsException {
+public:
+    HdfsConfigNotFound(const std::string &arg, const char *file, int line,
+                       const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsConfigNotFound() throw () {
+    }
+};
+
+class HdfsEndOfStream: public HdfsIOException {
+public:
+    HdfsEndOfStream(const std::string &arg, const char *file, int line,
+                    const char *stack) :
+        HdfsIOException(arg, file, line, stack) {
+    }
+
+    ~HdfsEndOfStream() throw () {
+    }
+};
+
+class HdfsInvalidBlockToken: public HdfsException {
+public:
+    HdfsInvalidBlockToken(const std::string &arg, const char *file, int line,
+                          const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsInvalidBlockToken() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+/**
+ * This will wrap HdfsNetworkConnectionException and HdfsTimeoutException.
+ * This exception will be caught and attempt will be performed to recover in HA case.
+ */
+class HdfsFailoverException: public HdfsException {
+public:
+    HdfsFailoverException(const std::string &arg, const char *file, int line,
+                          const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsFailoverException() throw () {
+    }
+};
+
+/**
+ * Fatal error during the rpc call. It may wrap other exceptions.
+ */
+class HdfsRpcException: public HdfsIOException {
+public:
+    HdfsRpcException(const std::string &arg, const char *file, int line,
+                     const char *stack) :
+        HdfsIOException(arg, file, line, stack) {
+    }
+
+    ~HdfsRpcException() throw () {
+    }
+};
+
+/**
+ * Server throw an error during the rpc call.
+ * It should be used internally and parsed for details.
+ */
+class HdfsRpcServerException: public HdfsIOException {
+public:
+    HdfsRpcServerException(const std::string &arg, const char *file, int line,
+                           const char *stack) :
+        HdfsIOException(arg, file, line, stack) {
+    }
+
+    ~HdfsRpcServerException() throw () {
+    }
+
+    const std::string &getErrClass() const {
+        return errClass;
+    }
+
+    void setErrClass(const std::string &errClass) {
+        this->errClass = errClass;
+    }
+
+    const std::string &getErrMsg() const {
+        return errMsg;
+    }
+
+    void setErrMsg(const std::string &errMsg) {
+        this->errMsg = errMsg;
+    }
+
+private:
+    std::string errClass;
+    std::string errMsg;
+};
+
+class HdfsTimeoutException: public HdfsException {
+public:
+    HdfsTimeoutException(const std::string &arg, const char *file, int line,
+                         const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsTimeoutException() throw () {
+    }
+};
+
+class InvalidParameter: public HdfsException {
+public:
+    InvalidParameter(const std::string &arg, const char *file, int line,
+                     const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~InvalidParameter() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class InvalidPath: public HdfsException {
+public:
+    InvalidPath(const std::string &arg, const char *file, int line,
+                const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~InvalidPath() throw () {
+    }
+};
+
+class NotReplicatedYetException: public HdfsException {
+public:
+    NotReplicatedYetException(const std::string &arg, const char *file,
+                              int line, const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~NotReplicatedYetException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class NSQuotaExceededException: public HdfsException {
+public:
+    NSQuotaExceededException(const std::string &arg, const char *file,
+                             int line, const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~NSQuotaExceededException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class ParentNotDirectoryException: public HdfsException {
+public:
+    ParentNotDirectoryException(const std::string &arg, const char *file,
+                                int line, const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~ParentNotDirectoryException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class ReplicaNotFoundException: public HdfsException {
+public:
+    ReplicaNotFoundException(const std::string &arg, const char *file,
+                             int line, const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~ReplicaNotFoundException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class SafeModeException: public HdfsException {
+public:
+    SafeModeException(const std::string &arg, const char *file, int line,
+                      const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~SafeModeException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class UnresolvedLinkException: public HdfsException {
+public:
+    UnresolvedLinkException(const std::string &arg, const char *file,
+                            int line, const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~UnresolvedLinkException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class UnsupportedOperationException: public HdfsException {
+public:
+    UnsupportedOperationException(const std::string &arg, const char *file,
+                                  int line, const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~UnsupportedOperationException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class SaslException: public HdfsException {
+public:
+    SaslException(const std::string &arg, const char *file, int line,
+                  const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~SaslException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class NameNodeStandbyException: public HdfsException {
+public:
+    NameNodeStandbyException(const std::string &arg, const char *file,
+                             int line, const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~NameNodeStandbyException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+class RpcNoSuchMethodException: public HdfsException {
+public:
+    RpcNoSuchMethodException(const std::string &arg, const char *file,
+                             int line, const char *stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~RpcNoSuchMethodException() throw () {
+    }
+
+public:
+    static const char *ReflexName;
+};
+
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_EXCEPTION_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/ExceptionInternal.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/ExceptionInternal.cc
new file mode 100644
index 0000000..3279feb
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/ExceptionInternal.cc
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "platform.h"
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "Thread.h"
+
+#include <cassert>
+#include <cstring>
+#include <sstream>
+
+namespace hdfs {
+
+function<bool(void)> ChecnOperationCanceledCallback;
+
+namespace internal {
+
+bool CheckOperationCanceled() {
+    if (ChecnOperationCanceledCallback && ChecnOperationCanceledCallback()) {
+        THROW(HdfsCanceled, "Operation has been canceled by the user.");
+    }
+
+    return false;
+}
+
+const char *GetSystemErrorInfo(int eno) {
+    static THREAD_LOCAL char message[64];
+    char buffer[64], *pbuffer;
+    pbuffer = buffer;
+#ifdef STRERROR_R_RETURN_INT
+    strerror_r(eno, buffer, sizeof(buffer));
+#else
+    pbuffer = strerror_r(eno, buffer, sizeof(buffer));
+#endif
+    snprintf(message, sizeof(message), "(errno: %d) %s", eno, pbuffer);
+    return message;
+}
+
+static THREAD_LOCAL std::string *MessageBuffer = NULL;
+
+static void InitMessageBuffer() {
+    if (!MessageBuffer) {
+      MessageBuffer = new std::string;
+    }
+}
+
+static void GetExceptionDetailInternal(const hdfs::HdfsException &e,
+                                       std::stringstream &ss, bool topLevel);
+
+static void GetExceptionDetailInternal(const std::exception &e,
+                                       std::stringstream &ss, bool topLevel) {
+    try {
+        if (!topLevel) {
+            ss << "Caused by\n";
+        }
+
+        ss << e.what();
+    } catch (const std::bad_alloc &e) {
+        return;
+    }
+
+    try {
+        hdfs::rethrow_if_nested(e);
+    } catch (const hdfs::HdfsException &nested) {
+        GetExceptionDetailInternal(nested, ss, false);
+    } catch (const std::exception &nested) {
+        GetExceptionDetailInternal(nested, ss, false);
+    }
+}
+
+static void GetExceptionDetailInternal(const hdfs::HdfsException &e,
+                                       std::stringstream &ss, bool topLevel) {
+    try {
+        if (!topLevel) {
+            ss << "Caused by\n";
+        }
+
+        ss << e.msg();
+    } catch (const std::bad_alloc &e) {
+        return;
+    }
+
+    try {
+        hdfs::rethrow_if_nested(e);
+    } catch (const hdfs::HdfsException &nested) {
+        GetExceptionDetailInternal(nested, ss, false);
+    } catch (const std::exception &nested) {
+        GetExceptionDetailInternal(nested, ss, false);
+    }
+}
+
+const char *GetExceptionDetail(const hdfs::HdfsException &e) {
+    std::stringstream ss;
+    GetExceptionDetailInternal(e, ss, true);
+
+    try {
+        InitMessageBuffer();
+        *MessageBuffer = ss.str();
+    } catch (const std::bad_alloc &e) {
+        return "Out of memory";
+    }
+
+    return MessageBuffer->c_str();
+}
+
+const char *GetExceptionDetail(const exception_ptr e) {
+    std::stringstream ss;
+
+    try {
+        InitMessageBuffer();
+        hdfs::rethrow_exception(e);
+    } catch (const hdfs::HdfsException &nested) {
+        GetExceptionDetailInternal(nested, ss, true);
+    } catch (const std::exception &nested) {
+        GetExceptionDetailInternal(nested, ss, true);
+    }
+
+    try {
+        *MessageBuffer = ss.str();
+    } catch (const std::bad_alloc &e) {
+        return "Out of memory";
+    }
+
+    return MessageBuffer->c_str();
+}
+
+static void GetExceptionMessage(const std::exception &e,
+                                std::stringstream &ss, int recursive) {
+    try {
+        for (int i = 0; i < recursive; ++i) {
+            ss << '\t';
+        }
+
+        if (recursive > 0) {
+            ss << "Caused by: ";
+        }
+
+        ss << e.what();
+    } catch (const std::bad_alloc &e) {
+        return;
+    }
+
+    try {
+        hdfs::rethrow_if_nested(e);
+    } catch (const std::exception &nested) {
+        GetExceptionMessage(nested, ss, recursive + 1);
+    }
+}
+
+const char *GetExceptionMessage(const exception_ptr e, std::string &buffer) {
+    std::stringstream ss;
+
+    try {
+        hdfs::rethrow_exception(e);
+    } catch (const std::bad_alloc &e) {
+        return "Out of memory";
+    } catch (const std::exception &e) {
+        GetExceptionMessage(e, ss, 0);
+    }
+
+    try {
+        buffer = ss.str();
+    } catch (const std::bad_alloc &e) {
+        return "Out of memory";
+    }
+
+    return buffer.c_str();
+}
+
+}
+}
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/ExceptionInternal.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/ExceptionInternal.h
new file mode 100644
index 0000000..1cd6a02
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/ExceptionInternal.h
@@ -0,0 +1,268 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_EXCEPTION_EXCEPTIONINTERNAL_H_
+#define _HDFS_LIBHDFS3_EXCEPTION_EXCEPTIONINTERNAL_H_
+
+#include "platform.h"
+
+#include <cassert>
+#include <cstdarg>
+#include <cstdio>
+#include <cstring>
+#include <unistd.h>
+#include <string>
+#include <sstream>
+
+#include "Function.h"
+#include "StackPrinter.h"
+
+#define STACK_DEPTH 64
+
+#define PATH_SEPRATOR '/'
+inline static const char *SkipPathPrefix(const char *path) {
+    int i, len = strlen(path);
+
+    for (i = len - 1; i > 0; --i) {
+        if (path[i] == PATH_SEPRATOR) {
+            break;
+        }
+    }
+
+    assert(i > 0 && i < len);
+    return path + i + 1;
+}
+
+#ifdef NEED_BOOST
+#include <boost/exception/all.hpp>
+
+namespace hdfs {
+using boost::exception_ptr;
+using boost::rethrow_exception;
+using boost::current_exception;
+
+class nested_exception : virtual public boost::exception {
+public:
+    nested_exception() : p(boost::current_exception()) {
+    }
+
+    nested_exception(const nested_exception &other) : p(other.p) {
+    }
+
+    nested_exception &operator = (const nested_exception &other) {
+        this->p = other.p;
+        return *this;
+    }
+
+    virtual ~nested_exception() throw() {}
+
+    void rethrow_nested() const {
+        boost::rethrow_exception(p);
+    }
+
+    boost::exception_ptr nested_ptr() const {
+        return p;
+    }
+protected:
+    boost::exception_ptr p;
+};
+
+template<typename BaseType>
+struct ExceptionWrapper : public BaseType, public nested_exception {
+    explicit ExceptionWrapper(BaseType const &e) : BaseType(static_cast < BaseType const &>(e)) {}
+    ~ExceptionWrapper() throw() {}
+};
+
+template<typename T>
+ATTRIBUTE_NORETURN
+static inline void throw_with_nested(T const &e) {
+    if (dynamic_cast<const nested_exception *>(&e)) {
+        std::terminate();
+    }
+
+    boost::throw_exception(ExceptionWrapper<T>(static_cast < T const &>(e)));
+}
+
+template<typename T>
+static inline void rethrow_if_nested(T const &e) {
+    const nested_exception *nested = dynamic_cast<const nested_exception *>(&e);
+
+    if (nested) {
+        nested->rethrow_nested();
+    }
+}
+
+template<typename T>
+static inline void rethrow_if_nested(const nested_exception &e) {
+    e.rethrow_nested();
+}
+
+namespace internal {
+
+
+template<typename THROWABLE>
+ATTRIBUTE_NORETURN ATTRIBUTE_NOINLINE
+void ThrowException(bool nested, const char *f, int l,
+                    const char *exceptionName, const char *fmt, ...) __attribute__((format(printf, 5, 6))) ;
+
+template<typename THROWABLE>
+ATTRIBUTE_NORETURN ATTRIBUTE_NOINLINE
+void ThrowException(bool nested, const char *f, int l,
+                    const char *exceptionName, const char *fmt, ...) {
+    va_list ap;
+    va_start(ap, fmt);
+    std::string buffer;
+    buffer = exceptionName;
+    buffer.append(": ");
+    int size = vsnprintf(NULL, 0, fmt, ap);
+    va_end(ap);
+    int offset = buffer.size();
+    buffer.resize(offset + size + 1);
+    va_start(ap, fmt);
+    vsnprintf(&buffer[offset], size + 1, fmt, ap);
+    va_end(ap);
+
+    if (!nested) {
+        boost::throw_exception(
+            THROWABLE(buffer.c_str(), SkipPathPrefix(f), l,
+                      hdfs::internal::PrintStack(1, STACK_DEPTH).c_str()));
+    } else {
+        hdfs::throw_with_nested(
+            THROWABLE(buffer.c_str(), SkipPathPrefix(f), l,
+                      hdfs::internal::PrintStack(1, STACK_DEPTH).c_str()));
+    }
+
+    throw std::logic_error("should not reach here.");
+}
+}
+
+}
+
+#else
+
+#include <exception>
+#include <stdexcept>
+
+namespace hdfs {
+
+using std::rethrow_exception;
+using std::current_exception;
+using std::make_exception_ptr;
+using std::throw_with_nested;
+using std::rethrow_if_nested;
+using std::exception_ptr;
+
+namespace internal {
+
+template<typename THROWABLE>
+ATTRIBUTE_NORETURN ATTRIBUTE_NOINLINE
+void ThrowException(bool nested, const char *f, int l,
+                    const char *exceptionName, const char *fmt, ...) __attribute__((format(printf, 5, 6)));
+
+template<typename THROWABLE>
+ATTRIBUTE_NORETURN ATTRIBUTE_NOINLINE
+void ThrowException(bool nested, const char *f, int l,
+                    const char *exceptionName, const char *fmt, ...) {
+    va_list ap;
+    va_start(ap, fmt);
+    std::string buffer;
+    buffer = exceptionName;
+    buffer.append(": ");
+    int size = vsnprintf(NULL, 0, fmt, ap);
+    va_end(ap);
+    int offset = buffer.size();
+    buffer.resize(offset + size + 1);
+    va_start(ap, fmt);
+    vsnprintf(&buffer[offset], size + 1, fmt, ap);
+    va_end(ap);
+
+    if (!nested) {
+        throw THROWABLE(buffer.c_str(), SkipPathPrefix(f), l,
+                        hdfs::internal::PrintStack(1, STACK_DEPTH).c_str());
+    } else {
+        std::throw_with_nested(
+            THROWABLE(buffer.c_str(), SkipPathPrefix(f), l,
+                      hdfs::internal::PrintStack(1, STACK_DEPTH).c_str()));
+    }
+
+    throw std::logic_error("should not reach here.");
+}
+
+}
+}
+
+#endif
+
+namespace hdfs {
+
+/**
+ * A user defined callback function used to check if a slow operation has been canceled by the user.
+ * If this function return true, HdfsCanceled will be thrown.
+ */
+extern function<bool(void)> ChecnOperationCanceledCallback;
+
+class HdfsException;
+
+}
+
+namespace hdfs {
+namespace internal {
+
+/**
+ * Check if a slow operation has been canceled by the user.
+ * @throw return false if operation is not canceled, else throw HdfsCanceled.
+ * @throw HdfsCanceled
+ */
+bool CheckOperationCanceled();
+
+/**
+ * Get a exception's detail message.
+ * If the exception contains a nested exception, recursively get all the nested exception's detail message.
+ * @param e The exception which detail message to be return.
+ * @return The exception's detail message.
+ */
+const char *GetExceptionDetail(const hdfs::HdfsException &e);
+
+/**
+ * Get a exception's detail message.
+ * If the exception contains a nested exception, recursively get all the nested exception's detail message.
+ * @param e The exception which detail message to be return.
+ * @return The exception's detail message.
+ */
+const char *GetExceptionDetail(const exception_ptr e);
+
+const char *GetExceptionMessage(const exception_ptr e, std::string &buffer);
+
+/**
+ * Get a error information by the given system error number.
+ * @param eno System error number.
+ * @return The error information.
+ * @throw nothrow
+ */
+const char *GetSystemErrorInfo(int eno);
+
+}
+}
+
+#define THROW(throwable, fmt, ...) \
+    hdfs::internal::ThrowException<throwable>(false, __FILE__, __LINE__, #throwable, fmt, ##__VA_ARGS__);
+
+#define NESTED_THROW(throwable, fmt, ...) \
+    hdfs::internal::ThrowException<throwable>(true, __FILE__, __LINE__, #throwable, fmt, ##__VA_ARGS__);
+
+#endif /* _HDFS_LIBHDFS3_EXCEPTION_EXCEPTIONINTERNAL_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/FileWrapper.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/FileWrapper.h
new file mode 100644
index 0000000..98c301f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/FileWrapper.h
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_FILEWRAPPER_H_
+#define _HDFS_LIBHDFS3_COMMON_FILEWRAPPER_H_
+
+#include <cassert>
+#include <cstdio>
+#include <stdint.h>
+#include <string>
+#include <string>
+#include <vector>
+
+namespace hdfs {
+namespace internal {
+
+class FileWrapper {
+public:
+    virtual ~FileWrapper() {
+    }
+
+    virtual bool open(int fd, bool delegate) = 0;
+    virtual bool open(const std::string &path) = 0;
+    virtual void close() = 0;
+    virtual const char *read(std::vector<char> &buffer, int32_t size) = 0;
+    virtual void copy(char *buffer, int32_t size) = 0;
+    virtual void seek(int64_t position) = 0;
+};
+
+class CFileWrapper: public FileWrapper {
+public:
+    CFileWrapper();
+    ~CFileWrapper();
+    bool open(int fd, bool delegate);
+    bool open(const std::string &path);
+    void close();
+    const char *read(std::vector<char> &buffer, int32_t size);
+    void copy(char *buffer, int32_t size);
+    void seek(int64_t offset);
+
+private:
+    FILE *file;
+    std::string path;
+};
+
+class MappedFileWrapper: public FileWrapper {
+public:
+    MappedFileWrapper();
+    ~MappedFileWrapper();
+    bool open(int fd, bool delegate);
+    bool open(const std::string &path);
+    void close();
+    const char *read(std::vector<char> &buffer, int32_t size);
+    void copy(char *buffer, int32_t size);
+    void seek(int64_t offset);
+
+private:
+    bool openInternal(int fd, bool delegate, size_t size);
+
+private:
+    bool delegate;
+    const char *begin;
+    const char *position;
+    int fd;
+    int64_t size;
+    std::string path;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_FILEWRAPPER_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Function.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Function.h
new file mode 100644
index 0000000..124b64c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Function.h
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_FUNCTION_H_
+#define _HDFS_LIBHDFS3_COMMON_FUNCTION_H_
+
+#include "platform.h"
+
+#ifdef NEED_BOOST
+#include <boost/function.hpp>
+#include <boost/bind.hpp>
+
+namespace hdfs {
+
+using boost::function;
+using boost::bind;
+using boost::reference_wrapper;
+
+}
+
+#else
+
+#include <functional>
+
+namespace hdfs {
+
+using std::function;
+using std::bind;
+using std::reference_wrapper;
+using namespace std::placeholders;
+
+}
+
+#endif
+
+#endif /* _HDFS_LIBHDFS3_COMMON_FUNCTION_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/HWCrc32c.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/HWCrc32c.cc
new file mode 100644
index 0000000..a4542fa
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/HWCrc32c.cc
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <cassert>
+#include <cstdlib>
+
+#include "HWCrc32c.h"
+
+#if ((defined(__X86__) || defined(__i386__) || defined(i386) || \
+      defined(_M_IX86) || defined(__386__) || defined(__x86_64__) || \
+      defined(_M_X64)))
+#include <cpuid.h>
+#endif
+
+#if ((defined(__X86__) || defined(__i386__) || defined(i386) || \
+      defined(_M_IX86) || defined(__386__) || defined(__x86_64__) || \
+      defined(_M_X64)))
+#if !defined(__SSE4_2__)
+
+namespace hdfs {
+namespace internal {
+
+#if defined(__LP64__)
+static inline uint64_t _mm_crc32_u64(uint64_t crc, uint64_t value) {
+    asm("crc32q %[value], %[crc]\n" : [crc] "+r"(crc) : [value] "rm"(value));
+    return crc;
+}
+#endif
+
+static inline uint32_t _mm_crc32_u16(uint32_t crc, uint16_t value) {
+    asm("crc32w %[value], %[crc]\n" : [crc] "+r"(crc) : [value] "rm"(value));
+    return crc;
+}
+
+static inline uint32_t _mm_crc32_u32(uint32_t crc, uint64_t value) {
+    asm("crc32l %[value], %[crc]\n" : [crc] "+r"(crc) : [value] "rm"(value));
+    return crc;
+}
+
+static inline uint32_t _mm_crc32_u8(uint32_t crc, uint8_t value) {
+    asm("crc32b %[value], %[crc]\n" : [crc] "+r"(crc) : [value] "rm"(value));
+    return crc;
+}
+
+}
+}
+
+#else
+
+#include <nmmintrin.h>
+
+#endif
+
+namespace hdfs {
+namespace internal {
+
+bool HWCrc32c::available() {
+#if ((defined(__X86__) || defined(__i386__) || defined(i386) || defined(_M_IX86) || defined(__386__) || defined(__x86_64__) || defined(_M_X64)))
+    uint32_t eax, ebx, ecx = 0, edx;
+    /*
+     * get the CPU features (level 1). ecx will have the SSE4.2 bit.
+     * This gcc routine automatically handles saving ebx in the case where we are -fpic or -fPIC
+     */
+    __get_cpuid(1, &eax, &ebx, &ecx, &edx);
+    return (ecx & (1 << 20)) != 0;
+#else
+    return false;
+#endif
+}
+
+void HWCrc32c::update(const void * b, int len) {
+    const char * p = static_cast<const char *>(b);
+#if defined(__LP64__)
+    const size_t bytes = sizeof(uint64_t);
+#else
+    const size_t bytes = sizeof(uint32_t);
+#endif
+    int align = bytes - reinterpret_cast<uint64_t>(p) % bytes;
+    align = bytes == static_cast<size_t>(align) ? 0 : align;
+
+    if (len < align) {
+        align = len;
+    }
+
+    updateInt64(p, align);
+    p = p + align;
+    len -= align;
+
+    if (len > 0) {
+        assert(0 == reinterpret_cast<uint64_t>(p) % bytes);
+
+        for (int i = len / bytes; i > 0; --i) {
+#if defined(__LP64__)
+            crc = _mm_crc32_u64(crc, *reinterpret_cast<const uint64_t *>(p));
+#else
+            crc = _mm_crc32_u32(crc, *reinterpret_cast<const uint32_t *>(p));
+#endif
+            p = p + bytes;
+        }
+
+        len &= bytes - 1;
+        updateInt64(p, len);
+    }
+}
+
+void HWCrc32c::updateInt64(const char * b, int len) {
+    assert(len < 8);
+
+    switch (len) {
+    case 7:
+        crc = _mm_crc32_u8(crc, *reinterpret_cast<const uint8_t *>(b++));
+
+    case 6:
+        crc = _mm_crc32_u16(crc, *reinterpret_cast<const uint16_t *>(b));
+        b += 2;
+
+        /* case 5 is below: 4 + 1 */
+    case 4:
+        crc = _mm_crc32_u32(crc, *reinterpret_cast<const uint32_t *>(b));
+        break;
+
+    case 3:
+        crc = _mm_crc32_u8(crc, *reinterpret_cast<const uint8_t *>(b++));
+
+    case 2:
+        crc = _mm_crc32_u16(crc, *reinterpret_cast<const uint16_t *>(b));
+        break;
+
+    case 5:
+        crc = _mm_crc32_u32(crc, *reinterpret_cast<const uint32_t *>(b));
+        b += 4;
+
+    case 1:
+        crc = _mm_crc32_u8(crc, *reinterpret_cast<const uint8_t *>(b));
+        break;
+
+    case 0:
+        break;
+    }
+}
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_HWCHECKSUM_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/HWCrc32c.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/HWCrc32c.h
new file mode 100644
index 0000000..d442a5a0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/HWCrc32c.h
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_HWCHECKSUM_H_
+#define _HDFS_LIBHDFS3_COMMON_HWCHECKSUM_H_
+
+#include "Checksum.h"
+
+namespace hdfs {
+namespace internal {
+
+/**
+ * Calculate CRC with hardware support.
+ */
+class HWCrc32c: public Checksum {
+public:
+    /**
+     * Constructor.
+     */
+    HWCrc32c() :
+        crc(0xFFFFFFFF) {
+    }
+
+    uint32_t getValue() {
+        return ~crc;
+    }
+
+    /**
+     * @ref Checksum#reset()
+     */
+    void reset() {
+        crc = 0xFFFFFFFF;
+    }
+
+    /**
+     * @ref Checksum#update(const void *, int)
+     */
+    void update(const void *b, int len);
+
+    /**
+     * Destory an HWCrc32 instance.
+     */
+    ~HWCrc32c() {
+    }
+
+    /**
+     * To test if the hardware support this function.
+     * @return true if the hardware support to calculate the CRC.
+     */
+    static bool available();
+
+private:
+    void updateInt64(const char *b, int len);
+
+private:
+    uint32_t crc;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_HWCHECKSUM_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Hash.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Hash.cc
new file mode 100644
index 0000000..7ca8447
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Hash.cc
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Hash.h"
+
+#ifdef NEED_BOOST
+
+#include <boost/functional/hash.hpp>
+
+namespace hdfs {
+namespace internal {
+
+/**
+ * A hash function object used to hash a boolean value.
+ */
+boost::hash<bool> BoolHasher;
+
+/**
+ * A hash function object used to hash an int value.
+ */
+boost::hash<int> Int32Hasher;
+
+/**
+ * A hash function object used to hash an 64 bit int value.
+ */
+boost::hash<int64_t> Int64Hasher;
+
+/**
+ * A hash function object used to hash a size_t value.
+ */
+boost::hash<size_t> SizeHasher;
+
+/**
+ * A hash function object used to hash a std::string object.
+ */
+boost::hash<std::string> StringHasher;
+}
+}
+
+#else
+
+#include <functional>
+
+namespace hdfs {
+namespace internal {
+
+/**
+ * A hash function object used to hash a boolean value.
+ */
+std::hash<bool> BoolHasher;
+
+/**
+ * A hash function object used to hash an int value.
+ */
+std::hash<int> Int32Hasher;
+
+/**
+ * A hash function object used to hash an 64 bit int value.
+ */
+std::hash<int64_t> Int64Hasher;
+
+/**
+ * A hash function object used to hash a size_t value.
+ */
+std::hash<size_t> SizeHasher;
+
+/**
+ * A hash function object used to hash a std::string object.
+ */
+std::hash<std::string> StringHasher;
+
+}
+}
+
+#endif
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Hash.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Hash.h
new file mode 100644
index 0000000..079f676
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Hash.h
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_HASH_H_
+#define _HDFS_LIBHDFS3_COMMON_HASH_H_
+
+#include "platform.h"
+
+#include <string>
+#include <vector>
+
+#ifdef NEED_BOOST
+
+#include <boost/functional/hash.hpp>
+
+namespace hdfs {
+namespace internal {
+
+/**
+ * A hash function object used to hash a boolean value.
+ */
+extern boost::hash<bool> BoolHasher;
+
+/**
+ * A hash function object used to hash an int value.
+ */
+extern boost::hash<int> Int32Hasher;
+
+/**
+ * A hash function object used to hash an 64 bit int value.
+ */
+extern boost::hash<int64_t> Int64Hasher;
+
+/**
+ * A hash function object used to hash a size_t value.
+ */
+extern boost::hash<size_t> SizeHasher;
+
+/**
+ * A hash function object used to hash a std::string object.
+ */
+extern boost::hash<std::string> StringHasher;
+
+}
+}
+
+#define HDFS_HASH_DEFINE(TYPE) \
+    namespace boost{ \
+    template<> \
+    struct hash<TYPE> { \
+        std::size_t operator()(const TYPE & key) const { \
+            return key.hash_value(); \
+        } \
+    }; \
+    }
+
+#else
+
+#include <functional>
+
+namespace hdfs {
+namespace internal {
+
+/**
+ * A hash function object used to hash a boolean value.
+ */
+extern std::hash<bool> BoolHasher;
+
+/**
+ * A hash function object used to hash an int value.
+ */
+extern std::hash<int> Int32Hasher;
+
+/**
+ * A hash function object used to hash an 64 bit int value.
+ */
+extern std::hash<int64_t> Int64Hasher;
+
+/**
+ * A hash function object used to hash a size_t value.
+ */
+extern std::hash<size_t> SizeHasher;
+
+/**
+ * A hash function object used to hash a std::string object.
+ */
+extern std::hash<std::string> StringHasher;
+
+}
+}
+
+#define HDFS_HASH_DEFINE(TYPE) \
+    namespace std{ \
+    template<> \
+    struct hash<TYPE> { \
+        std::size_t operator()(const TYPE & key) const { \
+            return key.hash_value(); \
+        } \
+    }; \
+    }
+
+#endif
+
+namespace hdfs {
+namespace internal {
+
+/**
+ * A hash function used to hash a vector of size_t values.
+ * @param vec The vector's reference which items are to be hashed.
+ * @param size The size of vec.
+ * @return The hash value.
+ * @throw nothrow
+ */
+static inline size_t CombineHasher(const size_t *vec, size_t size) {
+    size_t value = 0;
+
+    for (size_t i = 0; i < size; ++i) {
+        value ^= SizeHasher(vec[i]) << 1;
+    }
+
+    return value;
+}
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_HASH_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Logger.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Logger.cc
new file mode 100644
index 0000000..d4c885e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Logger.cc
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "platform.h"
+
+#include "Logger.h"
+
+#include <cassert>
+#include <cstdarg>
+#include <cstdio>
+#include <cstring>
+#include <sstream>
+#include <sys/time.h>
+#include <unistd.h>
+#include <vector>
+
+#include "DateTime.h"
+#include "Thread.h"
+
+namespace hdfs {
+namespace internal {
+
+Logger RootLogger;
+
+static mutex LoggerMutex;
+static THREAD_LOCAL char ProcessId[64];
+
+const char * const SeverityName[] = {
+  "FATAL", "ERROR", "WARNING", "INFO", "DEBUG1", "DEBUG2", "DEBUG3"
+};
+
+static void InitProcessId(char *p, size_t p_len) {
+    std::stringstream ss;
+    ss << "p" << getpid() << ", th" << pthread_self();
+    snprintf(p, p_len, "%s", ss.str().c_str());
+}
+
+Logger::Logger() :
+    fd(STDERR_FILENO), severity(DEFAULT_LOG_LEVEL) {
+}
+
+Logger::~Logger() {
+}
+
+void Logger::setOutputFd(int f) {
+    fd = f;
+}
+
+void Logger::setLogSeverity(LogSeverity l) {
+    severity = l;
+}
+
+void Logger::printf(LogSeverity s, const char *fmt, ...) {
+    va_list ap;
+
+    if (s > severity || fd < 0) {
+        return;
+    }
+
+    try {
+        if (ProcessId[0] == '\0') {
+          InitProcessId(ProcessId, sizeof(ProcessId));
+        }
+        std::vector<char> buffer;
+        struct tm tm_time;
+        struct timeval tval;
+        memset(&tval, 0, sizeof(tval));
+        gettimeofday(&tval, NULL);
+        localtime_r(&tval.tv_sec, &tm_time);
+        //determine buffer size
+        va_start(ap, fmt);
+        int size = vsnprintf(&buffer[0], buffer.size(), fmt, ap);
+        va_end(ap);
+        //100 is enough for prefix
+        buffer.resize(size + 100);
+        size = snprintf(&buffer[0], buffer.size(), "%04d-%02d-%02d %02d:%02d:%02d.%06ld, %s, %s ", tm_time.tm_year + 1900,
+                        1 + tm_time.tm_mon, tm_time.tm_mday, tm_time.tm_hour,
+                        tm_time.tm_min, tm_time.tm_sec, static_cast<long>(tval.tv_usec), ProcessId, SeverityName[s]);
+        va_start(ap, fmt);
+        size += vsnprintf(&buffer[size], buffer.size() - size, fmt, ap);
+        va_end(ap);
+        lock_guard<mutex> lock(LoggerMutex);
+        dprintf(fd, "%s\n", &buffer[0]);
+        return;
+    } catch (const std::exception &e) {
+        dprintf(fd, "%s:%d %s %s", __FILE__, __LINE__,
+                "FATAL: get an unexpected exception:", e.what());
+        throw;
+    }
+}
+
+}
+}
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Logger.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Logger.h
new file mode 100644
index 0000000..33c1a80
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Logger.h
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_LOGGER_H_
+#define _HDFS_LIBHDFS3_COMMON_LOGGER_H_
+
+#define DEFAULT_LOG_LEVEL INFO
+
+namespace hdfs {
+namespace internal {
+
+extern const char * const SeverityName[];
+
+enum LogSeverity {
+    FATAL, LOG_ERROR, WARNING, INFO, DEBUG1, DEBUG2, DEBUG3, NUM_SEVERITIES
+};
+
+class Logger;
+
+class Logger {
+public:
+    Logger();
+
+    ~Logger();
+
+    void setOutputFd(int f);
+
+    void setLogSeverity(LogSeverity l);
+
+    void printf(LogSeverity s, const char * fmt, ...)
+      __attribute__((format(printf, 3, 4)));
+
+private:
+    int fd;
+    LogSeverity severity;
+};
+
+extern Logger RootLogger;
+
+}
+}
+
+#define LOG(s, fmt, ...) \
+    hdfs::internal::RootLogger.printf(s, fmt, ##__VA_ARGS__)
+
+#endif /* _HDFS_LIBHDFS3_COMMON_LOGGER_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/LruMap.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/LruMap.h
new file mode 100644
index 0000000..a434aaf
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/LruMap.h
@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_LRUMAP_H_
+#define _HDFS_LIBHDFS3_COMMON_LRUMAP_H_
+
+#include "Thread.h"
+#include "Unordered.h"
+
+#include <list>
+
+namespace hdfs {
+namespace internal {
+
+template<typename K, typename V>
+class LruMap {
+public:
+    typedef K KeyType;
+    typedef V ValueType;
+    typedef std::pair<K, V> ItmeType;
+    typedef std::list<ItmeType> ListType;
+    typedef unordered_map<K, typename ListType::iterator> MapType;
+
+public:
+    LruMap() :
+        count(0), size(1000) {
+    }
+
+    LruMap(size_t size) :
+        count(0), size(size) {
+    }
+
+    ~LruMap() {
+        lock_guard<mutex> lock(mut);
+        map.clear();
+        list.clear();
+    }
+
+    void resize(size_t s) {
+        lock_guard<mutex> lock(mut);
+        size = s;
+
+        for (size_t i = count; i > s; --i) {
+            map.erase(list.back().first);
+            list.pop_back();
+        }
+    }
+
+    void insert(const KeyType & key, const ValueType & value) {
+        lock_guard<mutex> lock(mut);
+        typename MapType::iterator it = map.find(key);
+
+        if (it != map.end()) {
+            --count;
+            list.erase(it->second);
+        }
+
+        list.push_front(std::make_pair(key, value));
+        map[key] = list.begin();
+        ++count;
+
+        if (count > size) {
+            map.erase(list.back().first);
+            list.pop_back();
+        }
+    }
+
+    void erase(const KeyType & key) {
+        lock_guard<mutex> lock(mut);
+        typename MapType::iterator it = map.find(key);
+
+        if (it != map.end()) {
+            list.erase(it->second);
+            map.erase(it);
+            --count;
+        }
+    }
+
+    bool find(const KeyType & key, ValueType & value) {
+        lock_guard<mutex> lock(mut);
+        typename MapType::iterator it = map.find(key);
+
+        if (it != map.end()) {
+            list.push_front(*(it->second));
+            list.erase(it->second);
+            value = list.front().second;
+            map[key] = list.begin();
+            return true;
+        }
+
+        return false;
+    }
+
+private:
+    size_t count;
+    size_t size;
+    ListType list;
+    MapType map;
+    mutex mut;
+};
+
+}
+}
+#endif /* _HDFS_LIBHDFS3_COMMON_LRU_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/MappedFileWrapper.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/MappedFileWrapper.cc
new file mode 100644
index 0000000..18fe995
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/MappedFileWrapper.cc
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <limits>
+#include <sstream>
+#include <string>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "FileWrapper.h"
+
+namespace hdfs {
+namespace internal {
+
+MappedFileWrapper::MappedFileWrapper() :
+    delegate(true), begin(NULL), position(NULL), fd(-1), size(0) {
+}
+
+MappedFileWrapper::~MappedFileWrapper() {
+    close();
+}
+
+bool MappedFileWrapper::openInternal(int fd, bool delegate, size_t size) {
+    this->delegate = delegate;
+    void *retval = mmap(NULL, size, PROT_READ, MAP_FILE | MAP_PRIVATE, fd, 0);
+    begin = position = static_cast<const char *>(retval);
+
+    if (MAP_FAILED == retval) {
+        begin = position = NULL;
+        close();
+        return false;
+    }
+
+    if (posix_madvise(const_cast<char *>(begin), size, POSIX_MADV_SEQUENTIAL)) {
+        close();
+        return false;
+    }
+
+    return true;
+}
+
+bool MappedFileWrapper::open(int fd, bool delegate) {
+    size = lseek(fd, 0, SEEK_END);
+    lseek(fd, 0, SEEK_SET);
+    std::stringstream ss;
+    ss << "FileDescriptor " << fd;
+    path = ss.str();
+
+    if (static_cast<uint64_t>(size) >
+        static_cast<uint64_t>(std::numeric_limits<size_t>::max())) {
+        THROW(HdfsIOException,
+              "Cannot create memory mapped file for \"%s\", file is too large.",
+              path.c_str());
+    }
+
+    return openInternal(fd, delegate, static_cast<size_t>(size));
+}
+
+bool MappedFileWrapper::open(const std::string &path) {
+    struct stat st;
+
+    if (stat(path.c_str(), &st)) {
+        return false;
+    }
+
+    size = st.st_size;
+
+    if (static_cast<uint64_t>(size) >
+        static_cast<uint64_t>(std::numeric_limits<size_t>::max())) {
+        THROW(HdfsIOException,
+              "Cannot create memory mapped file for \"%s\", file is too large.",
+              path.c_str());
+    }
+
+    fd = ::open(path.c_str(), O_RDONLY);
+
+    if (fd < 0) {
+        return false;
+    }
+
+    this->path = path;
+    return openInternal(fd, true, st.st_size);
+}
+
+void MappedFileWrapper::close() {
+    if (NULL != begin) {
+        ::munmap(const_cast<char *>(begin), static_cast<size_t>(size));
+        begin = position = NULL;
+    }
+    if (fd >= 0 && delegate) {
+        ::close(fd);
+    }
+
+    fd = -1;
+    size = 0;
+    delegate = true;
+    path.clear();
+}
+
+const char * MappedFileWrapper::read(std::vector<char> &buffer, int32_t size) {
+    assert(NULL != begin && NULL != position);
+    const char * retval = position;
+    position += size;
+    return retval;
+}
+
+void MappedFileWrapper::copy(char *buffer, int32_t size) {
+    assert(NULL != begin && NULL != position);
+    memcpy(buffer, position, size);
+    position += size;
+}
+
+void MappedFileWrapper::seek(int64_t offset) {
+    assert(NULL != begin && NULL != position);
+    position = begin + offset;
+}
+
+}
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SWCrc32c.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SWCrc32c.cc
new file mode 100644
index 0000000..bbba4a3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SWCrc32c.cc
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SWCrc32c.h"
+
+namespace hdfs {
+namespace internal {
+
+/*
+ * The following CRC lookup table was generated automagically
+ * using the following model parameters:
+ *
+ * Generator Polynomial = ................. 0x1EDC6F41
+ * Generator Polynomial Length = .......... 32 bits
+ * Reflected Bits = ....................... TRUE
+ * Table Generation Offset = .............. 32 bits
+ * Number of Slices = ..................... 8 slices
+ * Slice Lengths = ........................ 8 8 8 8 8 8 8 8
+ */
+
+static const uint32_t crc_tableil8_o32[256] = { 0x00000000, 0xF26B8303,
+                                                0xE13B70F7, 0x1350F3F4, 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
+                                                0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, 0x4D43CFD0, 0xBF284CD3,
+                                                0xAC78BF27, 0x5E133C24, 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
+                                                0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, 0x9A879FA0, 0x68EC1CA3,
+                                                0x7BBCEF57, 0x89D76C54, 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
+                                                0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, 0xE72719C1, 0x154C9AC2,
+                                                0x061C6936, 0xF477EA35, 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
+                                                0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, 0x30E349B1, 0xC288CAB2,
+                                                0xD1D83946, 0x23B3BA45, 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
+                                                0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, 0x7DA08661, 0x8FCB0562,
+                                                0x9C9BF696, 0x6EF07595, 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
+                                                0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957, 0xCBA24573, 0x39C9C670,
+                                                0x2A993584, 0xD8F2B687, 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
+                                                0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, 0x96BF4DCC, 0x64D4CECF,
+                                                0x77843D3B, 0x85EFBE38, 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
+                                                0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7, 0x61C69362, 0x93AD1061,
+                                                0x80FDE395, 0x72966096, 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
+                                                0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, 0x2C855CB2, 0xDEEEDFB1,
+                                                0xCDBE2C45, 0x3FD5AF46, 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
+                                                0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6, 0xFB410CC2, 0x092A8FC1,
+                                                0x1A7A7C35, 0xE811FF36, 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
+                                                0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, 0x456CAC67, 0xB7072F64,
+                                                0xA457DC90, 0x563C5F93, 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
+                                                0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, 0x92A8FC17, 0x60C37F14,
+                                                0x73938CE0, 0x81F80FE3, 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
+                                                0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, 0xDFEB33C7, 0x2D80B0C4,
+                                                0x3ED04330, 0xCCBBC033, 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
+                                                0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D, 0x2892ED69, 0xDAF96E6A,
+                                                0xC9A99D9E, 0x3BC21E9D, 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
+                                                0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, 0x758FE5D6, 0x87E466D5,
+                                                0x94B49521, 0x66DF1622, 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
+                                                0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED, 0xC38D26C4, 0x31E6A5C7,
+                                                0x22B65633, 0xD0DDD530, 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
+                                                0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, 0x8ECEE914, 0x7CA56A17,
+                                                0x6FF599E3, 0x9D9E1AE0, 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
+                                                0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540, 0x590AB964, 0xAB613A67,
+                                                0xB831C993, 0x4A5A4A90, 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
+                                                0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, 0x24AA3F05, 0xD6C1BC06,
+                                                0xC5914FF2, 0x37FACCF1, 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
+                                                0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E, 0xF36E6F75, 0x0105EC76,
+                                                0x12551F82, 0xE03E9C81, 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
+                                                0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, 0xBE2DA0A5, 0x4C4623A6,
+                                                0x5F16D052, 0xAD7D5351
+                                              };
+
+/*
+ * end of the CRC lookup table crc_tableil8_o32
+ */
+
+void SWCrc32c::update(const void * b, int len) {
+    const char * p = static_cast<const char *>(b);
+    const char * e = p + len;
+
+    while (p < e) {
+        crc = crc_tableil8_o32[(crc ^ *p++) & 0x000000FF] ^ (crc >> 8);
+    }
+}
+
+}
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SWCrc32c.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SWCrc32c.h
new file mode 100644
index 0000000..8d21064
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SWCrc32c.h
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_SWCRC32C_H_
+#define _HDFS_LIBHDFS3_COMMON_SWCRC32C_H_
+
+#include "Checksum.h"
+#include "platform.h"
+
+#include <stdint.h>
+
+namespace hdfs {
+namespace internal {
+
+class SWCrc32c: public Checksum {
+public:
+    SWCrc32c() :
+        crc(0xFFFFFFFF) {
+    }
+
+    uint32_t getValue() {
+        return ~crc;
+    }
+
+    void reset() {
+        crc = 0xFFFFFFFF;
+    }
+
+    void update(const void *b, int len);
+
+    ~SWCrc32c() {
+    }
+
+private:
+    uint32_t crc;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_SWCRC32C_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SessionConfig.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SessionConfig.cc
new file mode 100644
index 0000000..0611d67
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SessionConfig.cc
@@ -0,0 +1,170 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SessionConfig.h"
+
+#include <sstream>
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "Function.h"
+
+#define ARRAYSIZE(A) (sizeof(A) / sizeof(A[0]))
+
+namespace hdfs {
+namespace internal {
+
+template<typename T>
+static void CheckRangeGE(const char *key, T const & value, T const & target) {
+    if (!(value >= target)) {
+        std::stringstream ss;
+        ss << "Invalid configure item: \"" << key << "\", value: " << value
+           << ", expected value should be larger than " << target;
+        THROW(HdfsConfigInvalid, "%s", ss.str().c_str());
+    }
+}
+
+template<typename T>
+static void CheckMultipleOf(const char *key, const T & value, int unit) {
+    if (value <= 0 || value % unit != 0) {
+        THROW(HdfsConfigInvalid, "%s should be larger than 0 and be the multiple of %d.", key, unit);
+    }
+}
+
+SessionConfig::SessionConfig(const Config & conf) {
+    ConfigDefault<bool> boolValues [] = {
+        {
+            &rpcTcpNoDelay, "rpc.client.connect.tcpnodelay", true
+        }, {
+            &readFromLocal, "dfs.client.read.shortcircuit", true
+        }, {
+            &addDatanode, "output.replace-datanode-on-failure", true
+        }, {
+            &notRetryAnotherNode, "input.notretry-another-node", false
+        }, {
+            &useMappedFile, "input.localread.mappedfile", true
+        }
+    };
+    ConfigDefault<int32_t> i32Values[] = {
+        {
+            &rpcMaxIdleTime, "rpc.client.max.idle", 10 * 1000, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &rpcPingTimeout, "rpc.client.ping.interval", 10 * 1000
+        }, {
+            &rpcConnectTimeout, "rpc.client.connect.timeout", 600 * 1000
+        }, {
+            &rpcReadTimeout, "rpc.client.read.timeout", 3600 * 1000
+        }, {
+            &rpcWriteTimeout, "rpc.client.write.timeout", 3600 * 1000
+        }, {
+            &rpcSocketLingerTimeout, "rpc.client.socekt.linger.timeout", -1
+        }, {
+            &rpcMaxRetryOnConnect, "rpc.client.connect.retry", 10, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &rpcTimeout, "rpc.client.timeout", 3600 * 1000
+        }, {
+            &defaultReplica, "dfs.default.replica", 3, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &inputConnTimeout, "input.connect.timeout", 600 * 1000
+        }, {
+            &inputReadTimeout, "input.read.timeout", 3600 * 1000
+        }, {
+            &inputWriteTimeout, "input.write.timeout", 3600 * 1000
+        }, {
+            &localReadBufferSize, "input.localread.default.buffersize", 1 * 1024 * 1024, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &prefetchSize, "dfs.prefetchsize", 10, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &maxGetBlockInfoRetry, "input.read.getblockinfo.retry", 3, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &maxLocalBlockInfoCacheSize, "input.localread.blockinfo.cachesize", 1000, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &maxReadBlockRetry, "input.read.max.retry", 60, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &chunkSize, "output.default.chunksize", 512, bind(CheckMultipleOf<int32_t>, _1, _2, 512)
+        }, {
+            &packetSize, "output.default.packetsize", 64 * 1024
+        }, {
+            &blockWriteRetry, "output.default.write.retry", 10, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &outputConnTimeout, "output.connect.timeout", 600 * 1000
+        }, {
+            &outputReadTimeout, "output.read.timeout", 3600 * 1000
+        }, {
+            &outputWriteTimeout, "output.write.timeout", 3600 * 1000
+        }, {
+            &closeFileTimeout, "output.close.timeout", 3600 * 1000
+        }, {
+            &packetPoolSize, "output.packetpool.size", 1024
+        }, {
+            &heartBeatInterval, "output.heeartbeat.interval", 10 * 1000
+        }, {
+            &rpcMaxHARetry, "dfs.client.failover.max.attempts", 15, bind(CheckRangeGE<int32_t>, _1, _2, 0)
+        }
+    };
+    ConfigDefault<int64_t> i64Values [] = {
+        {
+            &defaultBlockSize, "dfs.default.blocksize", 64 * 1024 * 1024, bind(CheckMultipleOf<int64_t>, _1, _2, 512)
+        }
+    };
+    ConfigDefault<std::string> strValues [] = {
+        {&defaultUri, "dfs.default.uri", "hdfs://localhost:9000" },
+        {&rpcAuthMethod, "hadoop.security.authentication", "simple" },
+        {&kerberosCachePath, "hadoop.security.kerberos.ticket.cache.path", "" },
+        {&logSeverity, "dfs.client.log.severity", "INFO" }
+    };
+
+    for (size_t i = 0; i < ARRAYSIZE(boolValues); ++i) {
+        *boolValues[i].variable = conf.getBool(boolValues[i].key,
+                                               boolValues[i].value);
+
+        if (boolValues[i].check) {
+            boolValues[i].check(boolValues[i].key, *boolValues[i].variable);
+        }
+    }
+
+    for (size_t i = 0; i < ARRAYSIZE(i32Values); ++i) {
+        *i32Values[i].variable = conf.getInt32(i32Values[i].key,
+                                               i32Values[i].value);
+
+        if (i32Values[i].check) {
+            i32Values[i].check(i32Values[i].key, *i32Values[i].variable);
+        }
+    }
+
+    for (size_t i = 0; i < ARRAYSIZE(i64Values); ++i) {
+        *i64Values[i].variable = conf.getInt64(i64Values[i].key,
+                                               i64Values[i].value);
+
+        if (i64Values[i].check) {
+            i64Values[i].check(i64Values[i].key, *i64Values[i].variable);
+        }
+    }
+
+    for (size_t i = 0; i < ARRAYSIZE(strValues); ++i) {
+        *strValues[i].variable = conf.getString(strValues[i].key,
+                                                strValues[i].value);
+
+        if (strValues[i].check) {
+            strValues[i].check(strValues[i].key, *strValues[i].variable);
+        }
+    }
+}
+
+}
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SessionConfig.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SessionConfig.h
new file mode 100644
index 0000000..9d9462d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SessionConfig.h
@@ -0,0 +1,324 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_SESSIONCONFIG_H_
+#define _HDFS_LIBHDFS3_COMMON_SESSIONCONFIG_H_
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "Function.h"
+#include "Logger.h"
+#include "XmlConfig.h"
+
+#include <cassert>
+#include <stdint.h>
+#include <vector>
+
+namespace hdfs {
+namespace internal {
+
+template<typename T>
+struct ConfigDefault {
+    T *variable; //variable this configure item should be bound to.
+    const char *key; //configure key.
+    T value; //default value.
+    function<void(const char *, T const &)> check;   //the function to validate the value.
+};
+
+class SessionConfig {
+public:
+
+    SessionConfig(const Config &conf);
+
+    /*
+     * rpc configure
+     */
+
+    int32_t getRpcConnectTimeout() const {
+        return rpcConnectTimeout;
+    }
+
+    int32_t getRpcMaxIdleTime() const {
+        return rpcMaxIdleTime;
+    }
+
+    int32_t getRpcMaxRetryOnConnect() const {
+        return rpcMaxRetryOnConnect;
+    }
+
+    int32_t getRpcPingTimeout() const {
+        return rpcPingTimeout;
+    }
+
+    int32_t getRpcReadTimeout() const {
+        return rpcReadTimeout;
+    }
+
+    bool isRpcTcpNoDelay() const {
+        return rpcTcpNoDelay;
+    }
+
+    int32_t getRpcWriteTimeout() const {
+        return rpcWriteTimeout;
+    }
+
+    /*
+     * FileSystem configure
+     */
+    const std::string &getDefaultUri() const {
+        return defaultUri;
+    }
+
+    int32_t getDefaultReplica() const {
+        return defaultReplica;
+    }
+
+    int64_t getDefaultBlockSize() const {
+        return defaultBlockSize;
+    }
+
+    /*
+     * InputStream configure
+     */
+    int32_t getLocalReadBufferSize() const {
+        return localReadBufferSize;
+    }
+
+    int32_t getInputReadTimeout() const {
+        return inputReadTimeout;
+    }
+
+    int32_t getInputWriteTimeout() const {
+        return inputWriteTimeout;
+    }
+
+    int32_t getInputConnTimeout() const {
+        return inputConnTimeout;
+    }
+
+    int32_t getPrefetchSize() const {
+        return prefetchSize;
+    }
+
+    bool isReadFromLocal() const {
+        return readFromLocal;
+    }
+
+    int32_t getMaxGetBlockInfoRetry() const {
+        return maxGetBlockInfoRetry;
+    }
+
+    int32_t getMaxLocalBlockInfoCacheSize() const {
+        return maxLocalBlockInfoCacheSize;
+    }
+
+    /*
+     * OutputStream configure
+     */
+    int32_t getDefaultChunkSize() const {
+        return chunkSize;
+    }
+
+    int32_t getDefaultPacketSize() const {
+        if (packetSize % chunkSize != 0) {
+            THROW(HdfsConfigInvalid,
+                  "output.default.packetsize should be larger than 0 "
+                  "and be the multiple of output.default.chunksize.");
+        }
+
+        return packetSize;
+    }
+
+    int32_t getBlockWriteRetry() const {
+        return blockWriteRetry;
+    }
+
+    int32_t getOutputConnTimeout() const {
+        return outputConnTimeout;
+    }
+
+    int32_t getOutputReadTimeout() const {
+        return outputReadTimeout;
+    }
+
+    int32_t getOutputWriteTimeout() const {
+        return outputWriteTimeout;
+    }
+
+    bool canAddDatanode() const {
+        return addDatanode;
+    }
+
+    int32_t getHeartBeatInterval() const {
+        return heartBeatInterval;
+    }
+
+    int32_t getRpcMaxHaRetry() const {
+        return rpcMaxHARetry;
+    }
+
+    void setRpcMaxHaRetry(int32_t rpcMaxHaRetry) {
+        rpcMaxHARetry = rpcMaxHaRetry;
+    }
+
+    const std::string &getRpcAuthMethod() const {
+        return rpcAuthMethod;
+    }
+
+    void setRpcAuthMethod(const std::string &rpcAuthMethod) {
+        this->rpcAuthMethod = rpcAuthMethod;
+    }
+
+    const std::string &getKerberosCachePath() const {
+        return kerberosCachePath;
+    }
+
+    void setKerberosCachePath(const std::string &kerberosCachePath) {
+        this->kerberosCachePath = kerberosCachePath;
+    }
+
+    int32_t getRpcSocketLingerTimeout() const {
+        return rpcSocketLingerTimeout;
+    }
+
+    void setRpcSocketLingerTimeout(int32_t rpcSocketLingerTimeout) {
+        this->rpcSocketLingerTimeout = rpcSocketLingerTimeout;
+    }
+
+    LogSeverity getLogSeverity() const {
+        for (size_t i = FATAL; i < NUM_SEVERITIES; ++i) {
+            if (logSeverity == SeverityName[i]) {
+                return static_cast<LogSeverity>(i);
+            }
+        }
+
+        return DEFAULT_LOG_LEVEL;
+    }
+
+    void setLogSeverity(const std::string &logSeverityLevel) {
+        this->logSeverity = logSeverityLevel;
+    }
+
+    int32_t getPacketPoolSize() const {
+        return packetPoolSize;
+    }
+
+    void setPacketPoolSize(int32_t packetPoolSize) {
+        this->packetPoolSize = packetPoolSize;
+    }
+
+    int32_t getCloseFileTimeout() const {
+        return closeFileTimeout;
+    }
+
+    void setCloseFileTimeout(int32_t closeFileTimeout) {
+        this->closeFileTimeout = closeFileTimeout;
+    }
+
+    int32_t getRpcTimeout() const {
+        return rpcTimeout;
+    }
+
+    void setRpcTimeout(int32_t rpcTimeout) {
+        this->rpcTimeout = rpcTimeout;
+    }
+
+    bool doesNotRetryAnotherNode() const {
+        return notRetryAnotherNode;
+    }
+
+    void setIFNotRetryAnotherNode(bool notRetryAnotherNode) {
+        this->notRetryAnotherNode = notRetryAnotherNode;
+    }
+
+    int32_t getMaxReadBlockRetry() const {
+        return maxReadBlockRetry;
+    }
+
+    void setMaxReadBlockRetry(int32_t maxReadBlockRetry) {
+        this->maxReadBlockRetry = maxReadBlockRetry;
+    }
+
+    bool doUseMappedFile() const {
+        return useMappedFile;
+    }
+
+    void setUseMappedFile(bool useMappedFile) {
+        this->useMappedFile = useMappedFile;
+    }
+
+public:
+    /*
+     * rpc configure
+     */
+    int32_t rpcMaxIdleTime;
+    int32_t rpcPingTimeout;
+    int32_t rpcConnectTimeout;
+    int32_t rpcReadTimeout;
+    int32_t rpcWriteTimeout;
+    int32_t rpcMaxRetryOnConnect;
+    int32_t rpcMaxHARetry;
+    int32_t rpcSocketLingerTimeout;
+    int32_t rpcTimeout;
+    bool rpcTcpNoDelay;
+    std::string rpcAuthMethod;
+
+    /*
+     * FileSystem configure
+     */
+    std::string defaultUri;
+    std::string kerberosCachePath;
+    std::string logSeverity;
+    int32_t defaultReplica;
+    int64_t defaultBlockSize;
+
+    /*
+     * InputStream configure
+     */
+    bool useMappedFile;
+    bool readFromLocal;
+    bool notRetryAnotherNode;
+    int32_t inputConnTimeout;
+    int32_t inputReadTimeout;
+    int32_t inputWriteTimeout;
+    int32_t localReadBufferSize;
+    int32_t maxGetBlockInfoRetry;
+    int32_t maxLocalBlockInfoCacheSize;
+    int32_t maxReadBlockRetry;
+    int32_t prefetchSize;
+
+    /*
+     * OutputStream configure
+     */
+    bool addDatanode;
+    int32_t chunkSize;
+    int32_t packetSize;
+    int32_t blockWriteRetry; //retry on block not replicated yet.
+    int32_t outputConnTimeout;
+    int32_t outputReadTimeout;
+    int32_t outputWriteTimeout;
+    int32_t packetPoolSize;
+    int32_t heartBeatInterval;
+    int32_t closeFileTimeout;
+
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_SESSIONCONFIG_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SharedPtr.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SharedPtr.h
new file mode 100644
index 0000000..8e0a40e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SharedPtr.h
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_SHARED_PTR_H_
+#define _HDFS_LIBHDFS3_COMMON_SHARED_PTR_H_
+
+#include <tr1/memory>
+
+namespace hdfs {
+namespace internal {
+
+using std::tr1::shared_ptr;
+
+}
+}
+
+#endif
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.cc
new file mode 100644
index 0000000..1e4c9c5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.cc
@@ -0,0 +1,670 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "StackPrinter.h"
+
+#include <cassert>
+#include <cxxabi.h>
+#include <dlfcn.h>
+#include <execinfo.h>
+#include <sstream>
+#include <string>
+#include <vector>
+
+namespace hdfs {
+namespace internal {
+
+static void ATTRIBUTE_NOINLINE GetStack(int skip, int maxDepth,
+                                        std::vector<void *> & stack) {
+    std::ostringstream ss;
+    ++skip; //current frame.
+    stack.resize(maxDepth + skip);
+    int size;
+    size = backtrace(&stack[0], maxDepth + skip);
+    size = size - skip;
+
+    if (size < 0) {
+        stack.resize(0);
+        return;
+    }
+
+    stack.erase(stack.begin(), stack.begin() + skip);
+    stack.resize(size);
+}
+
+std::string DemangleSymbol(const char * symbol) {
+    int status;
+    std::string retval;
+    char * name = abi::__cxa_demangle(symbol, 0, 0, &status);
+
+    switch (status) {
+    case 0:
+        retval = name;
+        break;
+
+    case -1:
+        throw std::bad_alloc();
+        break;
+
+    case -2:
+        retval = symbol;
+        break;
+
+    case -3:
+        retval = symbol;
+        break;
+    }
+
+    if (name) {
+        free(name);
+    }
+
+    return retval;
+}
+
+#if defined(__ELF__)
+
+#include <elf.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <link.h>  // For ElfW() macro.
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+// Re-runs fn until it doesn't cause EINTR.
+#define NO_INTR(fn)   do {} while ((fn) < 0 && errno == EINTR)
+
+// Read up to "count" bytes from file descriptor "fd" into the buffer
+// starting at "buf" while handling short reads and EINTR.  On
+// success, return the number of bytes read.  Otherwise, return -1.
+static ssize_t ReadPersistent(const int fd, void * buf, const size_t count) {
+    assert(fd >= 0);
+    char * buf0 = reinterpret_cast<char *>(buf);
+    ssize_t num_bytes = 0;
+
+    while (num_bytes < static_cast<ssize_t>(count)) {
+        ssize_t len;
+        NO_INTR(len = read(fd, buf0 + num_bytes, count - num_bytes));
+
+        if (len < 0) {  // There was an error other than EINTR.
+            return -1;
+        }
+
+        if (len == 0) {  // Reached EOF.
+            break;
+        }
+
+        num_bytes += len;
+    }
+
+    return num_bytes;
+}
+
+// Read up to "count" bytes from "offset" in the file pointed by file
+// descriptor "fd" into the buffer starting at "buf".  On success,
+// return the number of bytes read.  Otherwise, return -1.
+static ssize_t ReadFromOffset(const int fd, void * buf,
+                              const size_t count, const off_t offset) {
+    off_t off = lseek(fd, offset, SEEK_SET);
+
+    if (off == (off_t) - 1) {
+        return -1;
+    }
+
+    return ReadPersistent(fd, buf, count);
+}
+
+// Try reading exactly "count" bytes from "offset" bytes in a file
+// pointed by "fd" into the buffer starting at "buf" while handling
+// short reads and EINTR.  On success, return true. Otherwise, return
+// false.
+static bool ReadFromOffsetExact(const int fd, void * buf,
+                                const size_t count, const off_t offset) {
+    ssize_t len = ReadFromOffset(fd, buf, count, offset);
+    return len == static_cast<ssize_t>(count);
+}
+
+// Returns elf_header.e_type if the file pointed by fd is an ELF binary.
+static int FileGetElfType(const int fd) {
+    ElfW(Ehdr) elf_header;
+
+    if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+        return -1;
+    }
+
+    if (memcmp(elf_header.e_ident, ELFMAG, SELFMAG) != 0) {
+        return -1;
+    }
+
+    return elf_header.e_type;
+}
+
+// Read the section headers in the given ELF binary, and if a section
+// of the specified type is found, set the output to this section header
+// and return true.  Otherwise, return false.
+// To keep stack consumption low, we would like this function to not get
+// inlined.
+static bool
+GetSectionHeaderByType(const int fd, ElfW(Half) sh_num, const off_t sh_offset,
+                       ElfW(Word) type, ElfW(Shdr) *out) {
+    // Read at most 16 section headers at a time to save read calls.
+    ElfW(Shdr) buf[16];
+
+    for (int i = 0; i < sh_num;) {
+        const ssize_t num_bytes_left = (sh_num - i) * sizeof(buf[0]);
+        const ssize_t num_bytes_to_read =
+            (sizeof(buf) > static_cast<size_t>(num_bytes_left)) ? num_bytes_left : sizeof(buf);
+        const ssize_t len = ReadFromOffset(fd, buf, num_bytes_to_read,
+                                           sh_offset + i * sizeof(buf[0]));
+        assert(len % sizeof(buf[0]) == 0);
+        const ssize_t num_headers_in_buf = len / sizeof(buf[0]);
+
+        for (int j = 0; j < num_headers_in_buf; ++j) {
+            if (buf[j].sh_type == type) {
+                *out = buf[j];
+                return true;
+            }
+        }
+
+        i += num_headers_in_buf;
+    }
+
+    return false;
+}
+
+// There is no particular reason to limit section name to 63 characters,
+// but there has (as yet) been no need for anything longer either.
+const int kMaxSectionNameLen = 64;
+
+// name_len should include terminating '\0'.
+bool GetSectionHeaderByName(int fd, const char * name, size_t name_len,
+                            ElfW(Shdr) *out) {
+    ElfW(Ehdr) elf_header;
+
+    if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+        return false;
+    }
+
+    ElfW(Shdr) shstrtab;
+    off_t shstrtab_offset = (elf_header.e_shoff +
+                             elf_header.e_shentsize * elf_header.e_shstrndx);
+
+    if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) {
+        return false;
+    }
+
+    for (int i = 0; i < elf_header.e_shnum; ++i) {
+        off_t section_header_offset = (elf_header.e_shoff +
+                                       elf_header.e_shentsize * i);
+
+        if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) {
+            return false;
+        }
+
+        char header_name[kMaxSectionNameLen];
+
+        if (sizeof(header_name) < name_len) {
+            // No point in even trying.
+            return false;
+        }
+
+        off_t name_offset = shstrtab.sh_offset + out->sh_name;
+        ssize_t n_read = ReadFromOffset(fd, &header_name, name_len, name_offset);
+
+        if (n_read == -1) {
+            return false;
+        } else if (n_read != static_cast<ssize_t>(name_len)) {
+            // Short read -- name could be at end of file.
+            continue;
+        }
+
+        if (memcmp(header_name, name, name_len) == 0) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+// Read a symbol table and look for the symbol containing the
+// pc. Iterate over symbols in a symbol table and look for the symbol
+// containing "pc".  On success, return true and write the symbol name
+// to out.  Otherwise, return false.
+// To keep stack consumption low, we would like this function to not get
+// inlined.
+static bool
+FindSymbol(uint64_t pc, const int fd, char * out, int out_size,
+           uint64_t symbol_offset, const ElfW(Shdr) *strtab,
+           const ElfW(Shdr) *symtab) {
+    if (symtab == NULL) {
+        return false;
+    }
+
+    const int num_symbols = symtab->sh_size / symtab->sh_entsize;
+
+    for (int i = 0; i < num_symbols;) {
+        off_t offset = symtab->sh_offset + i * symtab->sh_entsize;
+        // If we are reading Elf64_Sym's, we want to limit this array to
+        // 32 elements (to keep stack consumption low), otherwise we can
+        // have a 64 element Elf32_Sym array.
+#if __WORDSIZE == 64
+#define NUM_SYMBOLS 32
+#else
+#define NUM_SYMBOLS 64
+#endif
+        // Read at most NUM_SYMBOLS symbols at once to save read() calls.
+        ElfW(Sym) buf[NUM_SYMBOLS];
+        const ssize_t len = ReadFromOffset(fd, &buf, sizeof(buf), offset);
+        assert(len % sizeof(buf[0]) == 0);
+        const ssize_t num_symbols_in_buf = len / sizeof(buf[0]);
+
+        for (int j = 0; j < num_symbols_in_buf; ++j) {
+            const ElfW(Sym)& symbol = buf[j];
+            uint64_t start_address = symbol.st_value;
+            start_address += symbol_offset;
+            uint64_t end_address = start_address + symbol.st_size;
+
+            if (symbol.st_value != 0 &&  // Skip null value symbols.
+                    symbol.st_shndx != 0 &&// Skip undefined symbols.
+                    start_address <= pc && pc < end_address) {
+                ssize_t len1 = ReadFromOffset(fd, out, out_size,
+                                              strtab->sh_offset + symbol.st_name);
+
+                if (len1 <= 0 || memchr(out, '\0', out_size) == NULL) {
+                    return false;
+                }
+
+                return true;  // Obtained the symbol name.
+            }
+        }
+
+        i += num_symbols_in_buf;
+    }
+
+    return false;
+}
+
+// Get the symbol name of "pc" from the file pointed by "fd".  Process
+// both regular and dynamic symbol tables if necessary.  On success,
+// write the symbol name to "out" and return true.  Otherwise, return
+// false.
+static bool GetSymbolFromObjectFile(const int fd, uint64_t pc,
+                                    char * out, int out_size,
+                                    uint64_t map_start_address) {
+    // Read the ELF header.
+    ElfW(Ehdr) elf_header;
+
+    if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+        return false;
+    }
+
+    uint64_t symbol_offset = 0;
+
+    if (elf_header.e_type == ET_DYN) {  // DSO needs offset adjustment.
+        symbol_offset = map_start_address;
+    }
+
+    ElfW(Shdr) symtab, strtab;
+
+    // Consult a regular symbol table first.
+    if (!GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
+                                SHT_SYMTAB, &symtab)) {
+        return false;
+    }
+
+    if (!ReadFromOffsetExact(fd, &strtab, sizeof(strtab), elf_header.e_shoff +
+                             symtab.sh_link * sizeof(symtab))) {
+        return false;
+    }
+
+    if (FindSymbol(pc, fd, out, out_size, symbol_offset,
+                   &strtab, &symtab)) {
+        return true;  // Found the symbol in a regular symbol table.
+    }
+
+    // If the symbol is not found, then consult a dynamic symbol table.
+    if (!GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
+                                SHT_DYNSYM, &symtab)) {
+        return false;
+    }
+
+    if (!ReadFromOffsetExact(fd, &strtab, sizeof(strtab), elf_header.e_shoff +
+                             symtab.sh_link * sizeof(symtab))) {
+        return false;
+    }
+
+    if (FindSymbol(pc, fd, out, out_size, symbol_offset,
+                   &strtab, &symtab)) {
+        return true;  // Found the symbol in a dynamic symbol table.
+    }
+
+    return false;
+}
+
+namespace {
+// Thin wrapper around a file descriptor so that the file descriptor
+// gets closed for sure.
+struct FileDescriptor {
+    const int fd_;
+    explicit FileDescriptor(int fd) : fd_(fd) {}
+    ~FileDescriptor() {
+        if (fd_ >= 0) {
+            NO_INTR(close(fd_));
+        }
+    }
+    int get() {
+        return fd_;
+    }
+
+private:
+    explicit FileDescriptor(const FileDescriptor &);
+    void operator=(const FileDescriptor &);
+};
+
+// Helper class for reading lines from file.
+//
+// Note: we don't use ProcMapsIterator since the object is big (it has
+// a 5k array member) and uses async-unsafe functions such as sscanf()
+// and snprintf().
+class LineReader {
+public:
+    explicit LineReader(int fd, char * buf, int buf_len) : fd_(fd),
+        buf_(buf), buf_len_(buf_len), bol_(buf), eol_(buf), eod_(buf) {
+    }
+
+    // Read '\n'-terminated line from file.  On success, modify "bol"
+    // and "eol", then return true.  Otherwise, return false.
+    //
+    // Note: if the last line doesn't end with '\n', the line will be
+    // dropped.  It's an intentional behavior to make the code simple.
+    bool ReadLine(const char ** bol, const char ** eol) {
+        if (BufferIsEmpty()) {  // First time.
+            const ssize_t num_bytes = ReadPersistent(fd_, buf_, buf_len_);
+
+            if (num_bytes <= 0) {  // EOF or error.
+                return false;
+            }
+
+            eod_ = buf_ + num_bytes;
+            bol_ = buf_;
+        } else {
+            bol_ = eol_ + 1;  // Advance to the next line in the buffer.
+            assert(bol_ <= eod_);// "bol_" can point to "eod_".
+
+            if (!HasCompleteLine()) {
+                const int incomplete_line_length = eod_ - bol_;
+                // Move the trailing incomplete line to the beginning.
+                memmove(buf_, bol_, incomplete_line_length);
+                // Read text from file and append it.
+                char * const append_pos = buf_ + incomplete_line_length;
+                const int capacity_left = buf_len_ - incomplete_line_length;
+                const ssize_t num_bytes = ReadPersistent(fd_, append_pos,
+                                          capacity_left);
+
+                if (num_bytes <= 0) {  // EOF or error.
+                    return false;
+                }
+
+                eod_ = append_pos + num_bytes;
+                bol_ = buf_;
+            }
+        }
+
+        eol_ = FindLineFeed();
+
+        if (eol_ == NULL) {  // '\n' not found.  Malformed line.
+            return false;
+        }
+
+        *eol_ = '\0';  // Replace '\n' with '\0'.
+        *bol = bol_;
+        *eol = eol_;
+        return true;
+    }
+
+    // Beginning of line.
+    const char * bol() {
+        return bol_;
+    }
+
+    // End of line.
+    const char * eol() {
+        return eol_;
+    }
+
+private:
+    explicit LineReader(const LineReader &);
+    void operator=(const LineReader &);
+
+    char * FindLineFeed() {
+        return reinterpret_cast<char *>(memchr(bol_, '\n', eod_ - bol_));
+    }
+
+    bool BufferIsEmpty() {
+        return buf_ == eod_;
+    }
+
+    bool HasCompleteLine() {
+        return !BufferIsEmpty() && FindLineFeed() != NULL;
+    }
+
+    const int fd_;
+    char * const buf_;
+    const int buf_len_;
+    char * bol_;
+    char * eol_;
+    const char * eod_; // End of data in "buf_".
+};
+}  // namespace
+
+// Place the hex number read from "start" into "*hex".  The pointer to
+// the first non-hex character or "end" is returned.
+static char * GetHex(const char * start, const char * end, uint64_t * hex) {
+    *hex = 0;
+    const char * p;
+
+    for (p = start; p < end; ++p) {
+        int ch = *p;
+
+        if ((ch >= '0' && ch <= '9') ||
+                (ch >= 'A' && ch <= 'F') || (ch >= 'a' && ch <= 'f')) {
+            *hex = (*hex << 4) | (ch < 'A' ? ch - '0' : (ch & 0xF) + 9);
+        } else {  // Encountered the first non-hex character.
+            break;
+        }
+    }
+
+    assert(p <= end);
+    return const_cast<char *>(p);
+}
+
+// Search for the object file (from /proc/self/maps) that contains
+// the specified pc. If found, open this file and return the file handle,
+// and also set start_address to the start address of where this object
+// file is mapped to in memory. Otherwise, return -1.
+static int
+OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
+        uint64_t & start_address) {
+    int object_fd;
+    // Open /proc/self/maps.
+    int maps_fd;
+    NO_INTR(maps_fd = open("/proc/self/maps", O_RDONLY));
+    FileDescriptor wrapped_maps_fd(maps_fd);
+
+    if (wrapped_maps_fd.get() < 0) {
+        return -1;
+    }
+
+    // Iterate over maps and look for the map containing the pc.  Then
+    // look into the symbol tables inside.
+    char buf[1024];// Big enough for line of sane /proc/self/maps
+    LineReader reader(wrapped_maps_fd.get(), buf, sizeof(buf));
+
+    while (true) {
+        const char * cursor;
+        const char * eol;
+
+        if (!reader.ReadLine(&cursor, &eol)) {  // EOF or malformed line.
+            return -1;
+        }
+
+        // Start parsing line in /proc/self/maps.  Here is an example:
+        //
+        // 08048000-0804c000 r-xp 00000000 08:01 2142121    /bin/cat
+        //
+        // We want start address (08048000), end address (0804c000), flags
+        // (r-xp) and file name (/bin/cat).
+        // Read start address.
+        cursor = GetHex(cursor, eol, &start_address);
+
+        if (cursor == eol || *cursor != '-') {
+            return -1;  // Malformed line.
+        }
+
+        ++cursor;  // Skip '-'.
+        // Read end address.
+        uint64_t end_address;
+        cursor = GetHex(cursor, eol, &end_address);
+
+        if (cursor == eol || *cursor != ' ') {
+            return -1;  // Malformed line.
+        }
+
+        ++cursor;  // Skip ' '.
+
+        // Check start and end addresses.
+        if (!(start_address <= pc && pc < end_address)) {
+            continue;  // We skip this map.  PC isn't in this map.
+        }
+
+        // Read flags.  Skip flags until we encounter a space or eol.
+        const char * const flags_start = cursor;
+
+        while (cursor < eol && *cursor != ' ') {
+            ++cursor;
+        }
+
+        // We expect at least four letters for flags (ex. "r-xp").
+        if (cursor == eol || cursor < flags_start + 4) {
+            return -1;  // Malformed line.
+        }
+
+        // Check flags.  We are only interested in "r-x" maps.
+        if (memcmp(flags_start, "r-x", 3) != 0) {  // Not a "r-x" map.
+            continue;// We skip this map.
+        }
+
+        ++cursor;  // Skip ' '.
+        // Skip to file name.  "cursor" now points to file offset.  We need to
+        // skip at least three spaces for file offset, dev, and inode.
+        int num_spaces = 0;
+
+        while (cursor < eol) {
+            if (*cursor == ' ') {
+                ++num_spaces;
+            } else if (num_spaces >= 3) {
+                // The first non-space character after  skipping three spaces
+                // is the beginning of the file name.
+                break;
+            }
+
+            ++cursor;
+        }
+
+        if (cursor == eol) {
+            return -1;  // Malformed line.
+        }
+
+        // Finally, "cursor" now points to file name of our interest.
+        NO_INTR(object_fd = open(cursor, O_RDONLY));
+
+        if (object_fd < 0) {
+            return -1;
+        }
+
+        return object_fd;
+    }
+}
+
+static const std::string SymbolizeAndDemangle(void * pc) {
+    std::vector<char> buffer(1024);
+    std::ostringstream ss;
+    uint64_t pc0 = reinterpret_cast<uintptr_t>(pc);
+    uint64_t start_address = 0;
+    int object_fd = OpenObjectFileContainingPcAndGetStartAddress(pc0,
+                    start_address);
+
+    if (object_fd == -1) {
+        return DEFAULT_STACK_PREFIX"Unknown";
+    }
+
+    FileDescriptor wrapped_object_fd(object_fd);
+    int elf_type = FileGetElfType(wrapped_object_fd.get());
+
+    if (elf_type == -1) {
+        return DEFAULT_STACK_PREFIX"Unknown";
+    }
+
+    if (!GetSymbolFromObjectFile(wrapped_object_fd.get(), pc0,
+                                 &buffer[0], buffer.size(), start_address)) {
+        return DEFAULT_STACK_PREFIX"Unknown";
+    }
+
+    ss << DEFAULT_STACK_PREFIX << DemangleSymbol(&buffer[0]);
+    return ss.str();
+}
+
+#elif defined(OS_MACOSX) && defined(HAVE_DLADDR)
+
+static const std::string SymbolizeAndDemangle(void * pc) {
+    Dl_info info;
+    std::ostringstream ss;
+
+    if (dladdr(pc, &info) && info.dli_sname) {
+        ss << DEFAULT_STACK_PREFIX << DemangleSymbol(info.dli_sname);
+    } else {
+        ss << DEFAULT_STACK_PREFIX << "Unknown";
+    }
+
+    return ss.str();
+}
+
+#endif
+
+const std::string PrintStack(int skip, int maxDepth) {
+    std::ostringstream ss;
+    std::vector<void *> stack;
+    GetStack(skip + 1, maxDepth, stack);
+
+    for (size_t i = 0; i < stack.size(); ++i) {
+        ss << SymbolizeAndDemangle(stack[i]) << std::endl;
+    }
+
+    return ss.str();
+}
+
+}
+}
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.h
new file mode 100644
index 0000000..4dff889
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.h
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_STACK_PRINTER_H_
+#define _HDFS_LIBHDFS3_COMMON_STACK_PRINTER_H_
+
+#include "platform.h"
+
+#include <string>
+
+#ifndef DEFAULT_STACK_PREFIX 
+#define DEFAULT_STACK_PREFIX "\t@\t"
+#endif
+
+namespace hdfs {
+namespace internal {
+
+extern const std::string PrintStack(int skip, int maxDepth);
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_STACK_PRINTER_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StringUtil.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StringUtil.h
new file mode 100644
index 0000000..33dabd9
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StringUtil.h
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_STRINGUTIL_H_
+#define _HDFS_LIBHDFS3_COMMON_STRINGUTIL_H_
+
+#include <string.h>
+#include <string>
+#include <vector>
+#include <cctype>
+
+namespace hdfs {
+namespace internal {
+
+static inline std::vector<std::string> StringSplit(const std::string &str,
+        const char *sep) {
+    char *token, *lasts = NULL;
+    std::string s = str;
+    std::vector<std::string> retval;
+    token = strtok_r(&s[0], sep, &lasts);
+
+    while (token) {
+        retval.push_back(token);
+        token = strtok_r(NULL, sep, &lasts);
+    }
+
+    return retval;
+}
+
+static inline  std::string StringTrim(const std::string &str) {
+    int start = 0, end = str.length();
+
+    for (; start < static_cast<int>(str.length()); ++start) {
+        if (!std::isspace(str[start])) {
+            break;
+        }
+    }
+
+    for (; end > 0; --end) {
+        if (!std::isspace(str[end - 1])) {
+            break;
+        }
+    }
+
+    return str.substr(start, end - start);
+}
+
+}
+}
+#endif /* _HDFS_LIBHDFS3_COMMON_STRINGUTIL_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Thread.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Thread.cc
new file mode 100644
index 0000000..810efc0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Thread.cc
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Thread.h"
+
+#include <pthread.h>
+#include <signal.h>
+#include <unistd.h>
+
+namespace hdfs {
+namespace internal {
+
+sigset_t ThreadBlockSignal() {
+    sigset_t sigs;
+    sigset_t oldMask;
+    sigemptyset(&sigs);
+    sigaddset(&sigs, SIGHUP);
+    sigaddset(&sigs, SIGINT);
+    sigaddset(&sigs, SIGTERM);
+    sigaddset(&sigs, SIGUSR1);
+    sigaddset(&sigs, SIGUSR2);
+    sigaddset(&sigs, SIGPIPE);
+    pthread_sigmask(SIG_BLOCK, &sigs, &oldMask);
+    return oldMask;
+}
+
+void ThreadUnBlockSignal(sigset_t sigs) {
+    pthread_sigmask(SIG_SETMASK, &sigs, 0);
+}
+
+}
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Thread.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Thread.h
new file mode 100644
index 0000000..6db14bb
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Thread.h
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_THREAD_H_
+#define _HDFS_LIBHDFS3_COMMON_THREAD_H_
+
+#include "platform.h"
+
+#include <signal.h>
+
+#ifdef NEED_BOOST
+
+#include <boost/thread.hpp>
+
+namespace hdfs {
+namespace internal {
+
+using boost::thread;
+using boost::mutex;
+using boost::lock_guard;
+using boost::unique_lock;
+using boost::condition_variable;
+using boost::defer_lock_t;
+using boost::once_flag;
+using boost::call_once;
+using namespace boost::this_thread;
+
+}
+}
+
+#else
+
+#include <thread>
+#include <mutex>
+#include <condition_variable>
+
+namespace hdfs {
+namespace internal {
+
+using std::thread;
+using std::mutex;
+using std::lock_guard;
+using std::unique_lock;
+using std::condition_variable;
+using std::defer_lock_t;
+using std::once_flag;
+using std::call_once;
+using namespace std::this_thread;
+
+}
+}
+#endif
+
+namespace hdfs {
+namespace internal {
+
+/*
+ * make the background thread ignore these signals (which should allow that
+ * they be delivered to the main thread)
+ */
+sigset_t ThreadBlockSignal();
+
+/*
+ * Restore previous signals.
+ */
+void ThreadUnBlockSignal(sigset_t sigs);
+
+}
+}
+
+#define CREATE_THREAD(retval, fun) \
+    do { \
+        sigset_t sigs = hdfs::internal::ThreadBlockSignal(); \
+        try { \
+            retval = hdfs::internal::thread(fun); \
+            hdfs::internal::ThreadUnBlockSignal(sigs); \
+        } catch (...) { \
+            hdfs::internal::ThreadUnBlockSignal(sigs); \
+            throw; \
+        } \
+    } while(0)
+
+#endif /* _HDFS_LIBHDFS3_COMMON_THREAD_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/UnorderedMap.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/UnorderedMap.h
new file mode 100644
index 0000000..3bb08af
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/UnorderedMap.h
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_UNORDERED_MAP_H_
+#define _HDFS_LIBHDFS3_COMMON_UNORDERED_MAP_H_
+
+#include <tr1/unordred_map>
+
+namespace hdfs {
+namespace internal {
+
+using std::tr1::unordered_map;
+
+}
+}
+
+#endif
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.cc
new file mode 100644
index 0000000..b26c993
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.cc
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WritableUtils.h"
+
+#include <arpa/inet.h>
+#include <cstring>
+#include <limits>
+#include <stdexcept>
+#include <string>
+
+namespace hdfs {
+namespace internal {
+
+WritableUtils::WritableUtils(char *b, size_t l) :
+    buffer(b), len(l), current(0) {
+}
+
+int32_t WritableUtils::ReadInt32() {
+    int64_t val;
+    val = ReadInt64();
+
+    if (val < std::numeric_limits<int32_t>::min()
+            || val > std::numeric_limits<int32_t>::max()) {
+        throw std::range_error("overflow");
+    }
+
+    return val;
+}
+
+int64_t WritableUtils::ReadInt64() {
+    int64_t value;
+    int firstByte = readByte();
+    int len = decodeWritableUtilsSize(firstByte);
+
+    if (len == 1) {
+        value = firstByte;
+        return value;
+    }
+
+    long i = 0;
+
+    for (int idx = 0; idx < len - 1; idx++) {
+        unsigned char b = readByte();
+        i = i << 8;
+        i = i | (b & 0xFF);
+    }
+
+    value = (isNegativeWritableUtils(firstByte) ? (i ^ -1L) : i);
+    return value;
+}
+
+void WritableUtils::ReadRaw(char *buf, size_t size) {
+    if (size > len - current) {
+        throw std::range_error("overflow");
+    }
+
+    memcpy(buf, buffer + current, size);
+    current += size;
+}
+
+std::string WritableUtils::ReadText() {
+    int32_t length;
+    std::string retval;
+    length = ReadInt32();
+    retval.resize(length);
+    ReadRaw(&retval[0], length);
+    return retval;
+}
+
+size_t WritableUtils::WriteInt32(int32_t value) {
+    return WriteInt64(value);
+}
+
+size_t WritableUtils::WriteInt64(int64_t value) {
+    size_t retval = 1;
+
+    if (value >= -112 && value <= 127) {
+        writeByte((int) value);
+        return retval;
+    }
+
+    int len = -112;
+
+    if (value < 0) {
+        value ^= -1L; // take one's complement'
+        len = -120;
+    }
+
+    long tmp = value;
+
+    while (tmp != 0) {
+        tmp = tmp >> 8;
+        len--;
+    }
+
+    ++retval;
+    writeByte((int) len);
+    len = (len < -120) ? -(len + 120) : -(len + 112);
+
+    for (int idx = len; idx != 0; idx--) {
+        int shiftbits = (idx - 1) * 8;
+        long mask = 0xFFL << shiftbits;
+        ++retval;
+        writeByte((int)((value & mask) >> shiftbits));
+    }
+
+    return retval;
+}
+
+size_t WritableUtils::WriteRaw(const char *buf, size_t size) {
+    if (size > len - current) {
+        throw std::range_error("overflow");
+    }
+
+    memcpy(buffer + current, buf, size);
+    current += size;
+    return size;
+}
+
+int WritableUtils::decodeWritableUtilsSize(int value) {
+    if (value >= -112) {
+        return 1;
+    } else if (value < -120) {
+        return -119 - value;
+    }
+
+    return -111 - value;
+}
+
+int WritableUtils::readByte() {
+    if (sizeof(char) > len - current) {
+        throw std::range_error("overflow");
+    }
+
+    return buffer[current++];
+}
+
+void WritableUtils::writeByte(int val) {
+    if (sizeof(char) > len - current) {
+        throw std::range_error("overflow");
+    }
+
+    buffer[current++] = val;
+}
+
+size_t WritableUtils::WriteText(const std::string & str) {
+    size_t retval = 0;
+    int32_t length = str.length();
+    retval += WriteInt32(length);
+    retval += WriteRaw(&str[0], length);
+    return retval;
+}
+
+bool WritableUtils::isNegativeWritableUtils(int value) {
+    return value < -120 || (value >= -112 && value < 0);
+}
+
+int32_t WritableUtils::ReadBigEndian32() {
+    char buf[sizeof(int32_t)];
+
+    for (size_t i = 0; i < sizeof(int32_t); ++i) {
+        buf[i] = readByte();
+    }
+
+    return ntohl(*(uint32_t *) buf);
+}
+
+}
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.h
new file mode 100644
index 0000000..7a16882
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.h
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS_3_UTIL_WRITABLEUTILS_H_
+#define _HDFS_LIBHDFS_3_UTIL_WRITABLEUTILS_H_
+
+#include <string>
+
+namespace hdfs {
+namespace internal {
+
+class WritableUtils {
+public:
+    WritableUtils(char *b, size_t l);
+
+    int32_t ReadInt32();
+
+    int64_t ReadInt64();
+
+    void ReadRaw(char *buf, size_t size);
+
+    std::string ReadText();
+
+    int readByte();
+
+    size_t WriteInt32(int32_t value);
+
+    size_t WriteInt64(int64_t value);
+
+    size_t WriteRaw(const char *buf, size_t size);
+
+    size_t WriteText(const std::string &str);
+
+private:
+    int decodeWritableUtilsSize(int value);
+
+    void writeByte(int val);
+
+    bool isNegativeWritableUtils(int value);
+
+    int32_t ReadBigEndian32();
+
+private:
+    char *buffer;
+    size_t len;
+    size_t current;
+};
+
+}
+}
+#endif /* _HDFS_LIBHDFS_3_UTIL_WritableUtils_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.cc
new file mode 100644
index 0000000..364eb04
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.cc
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WriteBuffer.h"
+
+#include <google/protobuf/io/coded_stream.h>
+
+using google::protobuf::io::CodedOutputStream;
+using google::protobuf::uint8;
+
+#define WRITEBUFFER_INIT_SIZE 64
+
+namespace hdfs {
+namespace internal {
+
+WriteBuffer::WriteBuffer() :
+    size(0), buffer(WRITEBUFFER_INIT_SIZE) {
+}
+
+WriteBuffer::~WriteBuffer() {
+}
+
+void WriteBuffer::writeVarint32(int32_t value, size_t pos) {
+    char buffer[5];
+    uint8 *end = CodedOutputStream::WriteVarint32ToArray(value,
+                  reinterpret_cast<uint8*>(buffer));
+    write(buffer, reinterpret_cast<char*>(end) - buffer, pos);
+}
+
+char *WriteBuffer::alloc(size_t offset, size_t s) {
+    assert(offset <= size && size <= buffer.size());
+
+    if (offset > size) {
+        return NULL;
+    }
+
+    size_t target = offset + s;
+
+    if (target >= buffer.size()) {
+        target = target > 2 * buffer.size() ? target : 2 * buffer.size();
+        buffer.resize(target);
+    }
+
+    size = offset + s;
+    return &buffer[offset];
+}
+
+void WriteBuffer::write(const void *bytes, size_t s, size_t pos) {
+    assert(NULL != bytes);
+    assert(pos <= size && pos < buffer.size());
+    char *p = alloc(size, s);
+    memcpy(p, bytes, s);
+}
+
+}
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.h
new file mode 100644
index 0000000..0935c3f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.h
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_WRITEBUFFER_H_
+#define _HDFS_LIBHDFS3_COMMON_WRITEBUFFER_H_
+
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+#include <stdint.h>
+#include <vector>
+
+#include <arpa/inet.h>
+
+namespace hdfs {
+namespace internal {
+
+/**
+ * a data buffer used to read and write.
+ */
+class WriteBuffer {
+public:
+    /**
+     * Construct a empty buffer.
+     * @throw nothrow
+     */
+    WriteBuffer();
+
+    /**
+     * Destroy a buffer.
+     * @throw nothrow
+     */
+    ~WriteBuffer();
+
+    /**
+     * Write string into buffer.
+     * Terminated '\0' will also be written into buffer.
+     * @param str The string to be written.
+     * @throw nothrow
+     */
+    void writeString(const char *str) {
+        writeString(str, size);
+    }
+
+    /**
+     * Write string into buffer with given position.
+     * Terminated '\0' will also be written into buffer and the data after given position will be overwritten.
+     * @param str The string to be written.
+     * @param pos The given start position in buffer.
+     * @throw nothrow
+     */
+    void writeString(const char *str, size_t pos) {
+        write(str, strlen(str) + 1, pos);
+    }
+
+    /**
+     * Write a vector into buffer.
+     * @param bytes The data be written.
+     * @param s The size of data.
+     */
+    void write(const void *bytes, size_t s) {
+        write(bytes, s, size);
+    }
+
+    /**
+     * Write a vector into buffer with given position.
+     * The data after given position will be overwritten.
+     * @param bytes The data be written.
+     * @param s The size of data.
+     * @param pos The given start position in buffer.
+     */
+    void write(const void *bytes, size_t s, size_t pos);
+
+    /**
+     * Write char into buffer.
+     * @param value The char to be written.
+     * @throw nothrow
+     */
+    void write(char value) {
+        write(value, size);
+    }
+
+    /**
+     * Write char into buffer with given position.
+     * The data after given position will be overwritten.
+     * @param value The char to be written.
+     * @param pos The given start position in buffer.
+     * @throw nothrow
+     */
+    void write(char value, size_t pos) {
+        write(&value, sizeof(value));
+    }
+
+    /**
+     * Convert the 16 bit integer into big endian and write into buffer.
+     * @param value The integer to be written.
+     * @throw nothrow
+     */
+    void writeBigEndian(int16_t value) {
+        writeBigEndian(value, size);
+    }
+
+    /**
+     * Convert the 16 bit integer into big endian and write into buffer with given position.
+     * The data after given position will be overwritten.
+     * @param value The integer to be written.
+     * @param pos The given start position in buffer.
+     * @throw nothrow
+     */
+    void writeBigEndian(int16_t value, size_t pos) {
+        int16_t v = htons(value);
+        write((const char *) &v, sizeof(v));
+    }
+
+    /**
+     * Convert the 32 bit integer into big endian and write into buffer.
+     * @param value The integer to be written.
+     * @throw nothrow
+     */
+    void writeBigEndian(int32_t value) {
+        writeBigEndian(value, size);
+    }
+
+    /**
+     * Convert the 32 bit integer into big endian and write into buffer with given position.
+     * The data after given position will be overwritten.
+     * @param value The integer to be written.
+     * @param pos The given start position in buffer.
+     * @throw nothrow
+     */
+    void writeBigEndian(int32_t value, size_t pos) {
+        int32_t v = htonl(value);
+        write((const char *) &v, sizeof(v), pos);
+    }
+
+    /**
+     * Convert the 32 bit integer into varint and write into buffer.
+     * @param value The integer to be written.
+     * @throw nothrow
+     */
+    void writeVarint32(int32_t value) {
+        writeVarint32(value, size);
+    }
+
+    /**
+     * Convert the 32 bit integer into varint and write into buffer with given position.
+     * The data after given position will be overwritten.
+     * @param value The integer to be written.
+     * @param pos The given start position in buffer.
+     * @throw nothrow
+     */
+    void writeVarint32(int32_t value, size_t pos);
+
+    /**
+     * Get the buffered data from given offset.
+     * @param offset The size of bytes to be ignored from begin of buffer.
+     * @return The buffered data, or NULL if offset is over the end of data.
+     * @throw nothrow
+     */
+    const char *getBuffer(size_t offset) const {
+        assert(offset <= size && offset < buffer.size());
+
+        if (offset >= size) {
+            return NULL;
+        }
+
+        return &buffer[offset];
+    }
+
+    /**
+     * Get the total bytes in the buffer from offset.
+     * @param offset The size of bytes to be ignored from begin of buffer.
+     * @return The total bytes in the buffer from offset.
+     * @throw nothrow
+     */
+    size_t getDataSize(size_t offset) const {
+        assert(offset <= size);
+        return size - offset;
+    }
+
+    /**
+     * Allocate a region of buffer to caller.
+     * Caller should copy the data into this region manually instead of calling Buffer's method.
+     *      This method will set the current data size to offset + s, caller may need to reset it to correct value.
+     * @param offset Expected offset in the buffer, the data after given offset will be overwritten.
+     * @param s Allocate the size of byte.
+     * @return The start address in the buffer from offset, or NULL if offset is over the end of data.
+     * @throw nothrow
+     */
+    char *alloc(size_t offset, size_t s);
+
+    /**
+     * Allocate a region of buffer to caller from the end of current buffer.
+     * Caller should copy the data into this region manually instead of calling Buffer's method.
+     *      This method will set the current data size to size + s, caller may need to reset it to correct value.
+     * @param s Allocate the size of byte.
+     * @return The start address in the buffer from offset.
+     * @throw nothrow
+     */
+    char *alloc(size_t s) {
+        return alloc(size, s);
+    }
+
+    /**
+     * Set the available data size.
+     * @param s The size to be set.
+     * throw nothrow
+     */
+    void setBufferDataSize(size_t s) {
+        size = s;
+    }
+
+private:
+    size_t size; //current write position.
+    std::vector<char> buffer;
+
+};
+
+}
+}
+#endif /* _HDFS_LIBHDFS3_COMMON_WRITEBUFFER_H_ */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/XmlConfig.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/XmlConfig.cc
new file mode 100644
index 0000000..7de532c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/XmlConfig.cc
@@ -0,0 +1,395 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "Hash.h"
+#include "XmlConfig.h"
+
+#include <cassert>
+#include <errno.h>
+#include <fstream>
+#include <libxml/parser.h>
+#include <libxml/tree.h>
+#include <limits>
+#include <string.h>
+#include <unistd.h>
+#include <vector>
+
+using namespace hdfs::internal;
+
+using std::map;
+using std::string;
+using std::vector;
+
+namespace hdfs {
+
+typedef map<string, string>::const_iterator Iterator;
+typedef map<string, string> Map;
+
+static int32_t StrToInt32(const char *str) {
+    long retval;
+    char *end = NULL;
+    errno = 0;
+    retval = strtol(str, &end, 0);
+
+    if (EINVAL == errno || 0 != *end) {
+        THROW(HdfsBadNumFoumat, "Invalid int32_t type: %s", str);
+    }
+
+    if (ERANGE == errno || retval > std::numeric_limits<int32_t>::max()
+            || retval < std::numeric_limits<int32_t>::min()) {
+        THROW(HdfsBadNumFoumat, "Underflow/Overflow int32_t type: %s", str);
+    }
+
+    return retval;
+}
+
+static int64_t StrToInt64(const char *str) {
+    long long retval;
+    char *end = NULL;
+    errno = 0;
+    retval = strtoll(str, &end, 0);
+
+    if (EINVAL == errno || 0 != *end) {
+        THROW(HdfsBadNumFoumat, "Invalid int64_t type: %s", str);
+    }
+
+    if (ERANGE == errno || retval > std::numeric_limits<int64_t>::max()
+            || retval < std::numeric_limits<int64_t>::min()) {
+        THROW(HdfsBadNumFoumat, "Underflow/Overflow int64_t type: %s", str);
+    }
+
+    return retval;
+}
+
+static bool StrToBool(const char *str) {
+    bool retval = false;
+
+    if (!strcasecmp(str, "true") || !strcmp(str, "1")) {
+        retval = true;
+    } else if (!strcasecmp(str, "false") || !strcmp(str, "0")) {
+        retval = false;
+    } else {
+        THROW(HdfsBadBoolFoumat, "Invalid bool type: %s", str);
+    }
+
+    return retval;
+}
+
+static double StrToDouble(const char *str) {
+    double retval;
+    char *end = NULL;
+    errno = 0;
+    retval = strtod(str, &end);
+
+    if (EINVAL == errno || 0 != *end) {
+        THROW(HdfsBadNumFoumat, "Invalid double type: %s", str);
+    }
+
+    if (ERANGE == errno || retval > std::numeric_limits<double>::max()
+            || retval < std::numeric_limits<double>::min()) {
+        THROW(HdfsBadNumFoumat, "Underflow/Overflow int64_t type: %s", str);
+    }
+
+    return retval;
+}
+
+static void readConfigItem(xmlNodePtr root, Map & kv, const char *path) {
+    std::string key, value;
+    xmlNodePtr curNode;
+    bool hasname = false, hasvalue = false;
+
+    for (curNode = root; NULL != curNode; curNode = curNode->next) {
+        if (curNode->type != XML_ELEMENT_NODE) {
+            continue;
+        }
+
+        if (!hasname && !strcmp((const char *) curNode->name, "name")) {
+            if (NULL != curNode->children
+                    && XML_TEXT_NODE == curNode->children->type) {
+                key = (const char *) curNode->children->content;
+                hasname = true;
+            }
+        } else if (!hasvalue
+                   && !strcmp((const char *) curNode->name, "value")) {
+            if (NULL != curNode->children
+                    && XML_TEXT_NODE == curNode->children->type) {
+                value = (const char *) curNode->children->content;
+                hasvalue = true;
+            }
+        } else {
+            continue;
+        }
+    }
+
+    if (hasname && hasvalue) {
+        kv[key] = value;
+        return;
+    } else if (hasname) {
+        kv[key] = "";
+        return;
+    }
+
+    THROW(HdfsBadConfigFoumat, "Config cannot parse configure file: \"%s\"",
+          path);
+}
+
+static void readConfigItems(xmlDocPtr doc, Map & kv, const char *path) {
+    xmlNodePtr root, curNode;
+    root = xmlDocGetRootElement(doc);
+
+    if (NULL == root || strcmp((const char *) root->name, "configuration")) {
+        THROW(HdfsBadConfigFoumat, "Config cannot parse configure file: \"%s\"",
+              path);
+    }
+
+    /*
+     * for each property
+     */
+    for (curNode = root->children; NULL != curNode; curNode = curNode->next) {
+        if (curNode->type != XML_ELEMENT_NODE) {
+            continue;
+        }
+
+        if (strcmp((const char *) curNode->name, "property")) {
+            THROW(HdfsBadConfigFoumat,
+                  "Config cannot parse configure file: \"%s\"", path);
+        }
+
+        readConfigItem(curNode->children, kv, path);
+    }
+}
+
+Config::Config(const char *p) :
+    path(p) {
+    update(p);
+}
+
+void Config::update(const char *p) {
+    char msg[64];
+    xmlDocPtr doc; /* the resulting document tree */
+    LIBXML_TEST_VERSION
+    kv.clear();
+    path = p;
+
+    if (access(path.c_str(), R_OK)) {
+        strerror_r(errno, msg, sizeof(msg));
+        THROW(HdfsBadConfigFoumat, "Cannot read configure file: \"%s\", %s",
+              path.c_str(), msg);
+    }
+
+    /* parse the file */
+    doc = xmlReadFile(path.c_str(), NULL, 0);
+
+    try {
+        /* check if parsing succeeded */
+        if (doc == NULL) {
+            THROW(HdfsBadConfigFoumat,
+                  "Config cannot parse configure file: \"%s\"", path.c_str());
+        } else {
+            readConfigItems(doc, kv, path.c_str());
+            /* free up the resulting document */
+            xmlFreeDoc(doc);
+        }
+    } catch (...) {
+        xmlFreeDoc(doc);
+        throw;
+    }
+}
+
+const char *Config::getString(const char *key) const {
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return it->second.c_str();
+}
+
+const char *Config::getString(const char *key, const char *def) const {
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        return def;
+    } else {
+        return it->second.c_str();
+    }
+}
+
+const char *Config::getString(const std::string & key) const {
+    return getString(key.c_str());
+}
+
+const char *Config::getString(const std::string & key,
+                               const std::string & def) const {
+    return getString(key.c_str(), def.c_str());
+}
+
+int64_t Config::getInt64(const char *key) const {
+    int64_t retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    try {
+        retval = StrToInt64(it->second.c_str());
+    } catch (const HdfsBadNumFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+int64_t Config::getInt64(const char *key, int64_t def) const {
+    int64_t retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        return def;
+    }
+
+    try {
+        retval = StrToInt64(it->second.c_str());
+    } catch (const HdfsBadNumFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+int32_t Config::getInt32(const char *key) const {
+    int32_t retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    try {
+        retval = StrToInt32(it->second.c_str());
+    } catch (const HdfsBadNumFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+int32_t Config::getInt32(const char *key, int32_t def) const {
+    int32_t retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        return def;
+    }
+
+    try {
+        retval = StrToInt32(it->second.c_str());
+    } catch (const HdfsBadNumFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+double Config::getDouble(const char *key) const {
+    double retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    try {
+        retval = StrToDouble(it->second.c_str());
+    } catch (const HdfsBadNumFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+double Config::getDouble(const char *key, double def) const {
+    double retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        return def;
+    }
+
+    try {
+        retval = StrToDouble(it->second.c_str());
+    } catch (const HdfsBadNumFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+bool Config::getBool(const char *key) const {
+    bool retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    try {
+        retval = StrToBool(it->second.c_str());
+    } catch (const HdfsBadBoolFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+bool Config::getBool(const char *key, bool def) const {
+    bool retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        return def;
+    }
+
+    try {
+        retval = StrToBool(it->second.c_str());
+    } catch (const HdfsBadNumFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+size_t Config::hash_value() const {
+    vector<size_t> values;
+    map<string, string>::const_iterator s, e;
+    e = kv.end();
+
+    for (s = kv.begin(); s != e; ++s) {
+        values.push_back(StringHasher(s->first));
+        values.push_back(StringHasher(s->second));
+    }
+
+    return CombineHasher(&values[0], values.size());
+}
+
+}
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/XmlConfig.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/XmlConfig.h
new file mode 100644
index 0000000..cb9459d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/XmlConfig.h
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_XMLCONFIG_H_
+#define _HDFS_LIBHDFS3_COMMON_XMLCONFIG_H_
+
+#include <stdint.h>
+#include <string>
+#include <sstream>
+#include <map>
+
+namespace hdfs {
+
+/**
+ * A configure file parser.
+ */
+class Config {
+public:
+    /**
+     * Construct a empty Config instance.
+     */
+    Config() {
+    }
+
+    /**
+     * Construct a Config with given configure file.
+     * @param path The path of configure file.
+     * @throw HdfsBadConfigFoumat
+     */
+    Config(const char *path);
+
+    /**
+     * Parse the configure file.
+     * @throw HdfsBadConfigFoumat
+     */
+    void update(const char *path);
+
+    /**
+     * Get a string with given configure key.
+     * @param key The key of the configure item.
+     * @return The value of configure item.
+     * @throw HdfsConfigNotFound
+     */
+    const char *getString(const char *key) const;
+
+    /**
+     * Get a string with given configure key.
+     * Return the default value def if key is not found.
+     * @param key The key of the configure item.
+     * @param def The defalut value.
+     * @return The value of configure item.
+     */
+    const char *getString(const char *key, const char *def) const;
+
+    /**
+     * Get a string with given configure key.
+     * @param key The key of the configure item.
+     * @return The value of configure item.
+     * @throw HdfsConfigNotFound
+     */
+    const char *getString(const std::string & key) const;
+
+    /**
+     * Get a string with given configure key.
+     * Return the default value def if key is not found.
+     * @param key The key of the configure item.
+     * @param def The defalut value.
+     * @return The value of configure item.
+     */
+    const char *getString(const std::string & key,
+                           const std::string & def) const;
+
+    /**
+     * Get a 64 bit integer with given configure key.
+     * @param key The key of the configure item.
+     * @return The value of configure item.
+     * @throw HdfsConfigNotFound
+     */
+    int64_t getInt64(const char *key) const;
+
+    /**
+     * Get a 64 bit integer with given configure key.
+     * Return the default value def if key is not found.
+     * @param key The key of the configure item.
+     * @param def The defalut value.
+     * @return The value of configure item.
+     */
+    int64_t getInt64(const char *key, int64_t def) const;
+
+    /**
+     * Get a 32 bit integer with given configure key.
+     * @param key The key of the configure item.
+     * @return The value of configure item.
+     * @throw HdfsConfigNotFound
+     */
+    int32_t getInt32(const char *key) const;
+
+    /**
+     * Get a 32 bit integer with given configure key.
+     * Return the default value def if key is not found.
+     * @param key The key of the configure item.
+     * @param def The defalut value.
+     * @return The value of configure item.
+     */
+    int32_t getInt32(const char *key, int32_t def) const;
+
+    /**
+     * Get a double with given configure key.
+     * @param key The key of the configure item.
+     * @return The value of configure item.
+     * @throw HdfsConfigNotFound
+     */
+    double getDouble(const char *key) const;
+
+    /**
+     * Get a double with given configure key.
+     * Return the default value def if key is not found.
+     * @param key The key of the configure item.
+     * @param def The defalut value.
+     * @return The value of configure item.
+     */
+    double getDouble(const char *key, double def) const;
+
+    /**
+     * Get a boolean with given configure key.
+     * @param key The key of the configure item.
+     * @return The value of configure item.
+     * @throw HdfsConfigNotFound
+     */
+    bool getBool(const char *key) const;
+
+    /**
+     * Get a boolean with given configure key.
+     * Return the default value def if key is not found.
+     * @param key The key of the configure item.
+     * @param def The default value.
+     * @return The value of configure item.
+     */
+    bool getBool(const char *key, bool def) const;
+
+    /**
+     * Set a configure item
+     * @param key The key will set.
+     * @param value The value will be set to.
+     */
+    template<typename T>
+    void set(const char *key, T const & value) {
+        std::stringstream ss;
+        ss << value;
+        kv[key] = ss.str();
+    }
+
+    /**
+     * Get the hash value of this object
+     *
+     * @return The hash value
+     */
+    size_t hash_value() const;
+
+private:
+    std::string path;
+    std::map<std::string, std::string> kv;
+};
+
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_XMLCONFIG_H_ */