HADOOP-10985. native client: split ndfs.c into meta, file, util, and permission (cmccabe)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HADOOP-10388@1619622 13f79535-47bb-0310-9956-ffa450edef68
diff --git a/hadoop-native-core/CHANGES.txt b/hadoop-native-core/CHANGES.txt
index 14a1518..aee3387 100644
--- a/hadoop-native-core/CHANGES.txt
+++ b/hadoop-native-core/CHANGES.txt
@@ -1,5 +1,7 @@
 HADOOP-10388 Change Log
 
+    HADOOP-10985: native client: split ndfs.c into meta, file, util, and permission (cmccabe)
+
     HADOOP-10877. native client: implement hdfsMove and hdfsCopy (cmccabe)
 
     HADOOP-10725. Implement listStatus and getFileInfo in the native client (cmccabe)
diff --git a/hadoop-native-core/src/main/native/CMakeLists.txt b/hadoop-native-core/src/main/native/CMakeLists.txt
index 619117f..c301c71 100644
--- a/hadoop-native-core/src/main/native/CMakeLists.txt
+++ b/hadoop-native-core/src/main/native/CMakeLists.txt
@@ -113,7 +113,11 @@
     fs/common.c
     fs/copy.c
     fs/fs.c
-    ndfs/ndfs.c
+    ndfs/file.c
+    ndfs/meta.c
+    ndfs/ops.c
+    ndfs/permission.c
+    ndfs/util.c
     ${HDFS_PROTOBUF_SRCS}
 )
 
diff --git a/hadoop-native-core/src/main/native/ndfs/file.c b/hadoop-native-core/src/main/native/ndfs/file.c
new file mode 100644
index 0000000..687b6df
--- /dev/null
+++ b/hadoop-native-core/src/main/native/ndfs/file.c
@@ -0,0 +1,264 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/hadoop_err.h"
+#include "common/hconf.h"
+#include "common/net.h"
+#include "common/string.h"
+#include "common/uri.h"
+#include "fs/common.h"
+#include "fs/fs.h"
+#include "ndfs/meta.h"
+#include "ndfs/util.h"
+#include "rpc/messenger.h"
+#include "rpc/proxy.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <uv.h>
+
+/**
+ * Set if the file is read-only... otherwise, the file is assumed to be
+ * write-only.
+ */
+#define NDFS_FILE_FLAG_RO                       0x1
+
+/** This flag is for compatibility with some old test harnesses. */
+#define NDFS_FILE_FLAG_DISABLE_DIRECT_READ      0x2
+
+/** Base class for both read-only and write-only files. */
+struct native_file_base {
+    /** Fields common to all filesystems. */
+    struct hadoop_file_base base;
+
+    /** NDFS file flags. */
+    int flags;
+};
+
+/** A read-only file. */
+struct native_ro_file {
+    struct native_file_base base;
+    uint64_t bytes_read;
+};
+
+/** A write-only file. */
+struct native_wo_file {
+    struct native_file_base base;
+};
+
+int ndfs_file_is_open_for_read(hdfsFile bfile)
+{
+    struct native_file_base *file = (struct native_file_base *)bfile;
+    return !!(file->flags & NDFS_FILE_FLAG_RO);
+}
+
+int ndfs_file_is_open_for_write(hdfsFile bfile)
+{
+    struct native_file_base *file = (struct native_file_base *)bfile;
+    return !(file->flags & NDFS_FILE_FLAG_RO);
+}
+
+int ndfs_file_get_read_statistics(hdfsFile bfile,
+            struct hdfsReadStatistics **out)
+{
+    struct hdfsReadStatistics *stats;
+    struct native_ro_file *file = (struct native_ro_file *)bfile;
+
+    if (!(file->base.flags & NDFS_FILE_FLAG_RO)) {
+        errno = EINVAL;
+        return -1;
+    }
+    stats = calloc(1, sizeof(*stats));
+    if (!stats) {
+        errno = ENOMEM; 
+        return -1;
+    }
+    stats->totalBytesRead = file->bytes_read;
+    *out = stats;
+    return 0;
+}
+
+static struct hadoop_err *ndfs_open_file_for_read(
+        struct native_ro_file **out __attribute__((unused)),
+        struct native_fs *fs __attribute__((unused)),
+        const char *uri __attribute__((unused)))
+{
+    errno = ENOTSUP;
+    return NULL;
+}
+
+static struct hadoop_err *ndfs_open_file_for_write(
+        struct native_ro_file **out __attribute__((unused)),
+        struct native_fs *fs __attribute__((unused)),
+        const char *uri __attribute__((unused)),
+        int buffer_size __attribute__((unused)),
+        short replication __attribute__((unused)),
+        tSize block_size __attribute__((unused)))
+{
+    errno = ENOTSUP;
+    return NULL;
+}
+
+hdfsFile ndfs_open_file(hdfsFS bfs, const char* uri, int flags, 
+                      int buffer_size, short replication, tSize block_size)
+{
+    struct native_fs *fs = (struct native_fs *)bfs;
+    struct native_ro_file *file = NULL;
+    struct hadoop_err *err;
+    int accmode;
+    char *path = NULL;
+
+    err = build_path(fs, uri, &path);
+    if (err) {
+        goto done;
+    }
+    accmode = flags & O_ACCMODE;
+    if (accmode == O_RDONLY) {
+        err = ndfs_open_file_for_read(&file, fs, path);
+    } else if (accmode == O_WRONLY) {
+        err = ndfs_open_file_for_write(&file, fs, path,
+                        buffer_size, replication, block_size);
+    } else {
+        err = hadoop_lerr_alloc(EINVAL, "cannot open a hadoop file in "
+                "mode 0x%x\n", accmode);
+    }
+done:
+    free(path);
+    return hadoopfs_errno_and_retptr(err, file);
+}
+
+int ndfs_close_file(hdfsFS fs __attribute__((unused)),
+                    hdfsFile bfile __attribute__((unused)))
+{
+    errno = ENOTSUP;
+    return -1;
+}
+
+int ndfs_seek(hdfsFS bfs __attribute__((unused)),
+              hdfsFile bfile __attribute__((unused)),
+              tOffset desiredPos __attribute__((unused)))
+{
+    errno = ENOTSUP;
+    return -1;
+}
+
+tOffset ndfs_tell(hdfsFS bfs __attribute__((unused)),
+                  hdfsFile bfile __attribute__((unused)))
+{
+    errno = ENOTSUP;
+    return -1;
+}
+
+tSize ndfs_read(hdfsFS bfs __attribute__((unused)),
+                       hdfsFile bfile __attribute__((unused)),
+                       void *buffer __attribute__((unused)),
+                       tSize length __attribute__((unused)))
+{
+    errno = ENOTSUP;
+    return -1;
+}
+
+tSize ndfs_pread(hdfsFS bfs __attribute__((unused)),
+            hdfsFile bfile __attribute__((unused)),
+            tOffset position __attribute__((unused)),
+            void* buffer __attribute__((unused)),
+            tSize length __attribute__((unused)))
+{
+    errno = ENOTSUP;
+    return -1;
+}
+
+tSize ndfs_write(hdfsFS bfs __attribute__((unused)),
+            hdfsFile bfile __attribute__((unused)),
+            const void* buffer __attribute__((unused)),
+            tSize length __attribute__((unused)))
+{
+    errno = ENOTSUP;
+    return -1;
+}
+
+int ndfs_flush(hdfsFS bfs __attribute__((unused)),
+               hdfsFile bfile __attribute__((unused)))
+{
+    errno = ENOTSUP;
+    return -1;
+}
+
+int ndfs_hflush(hdfsFS bfs __attribute__((unused)),
+                hdfsFile bfile __attribute__((unused)))
+{
+    errno = ENOTSUP;
+    return -1;
+}
+
+int ndfs_hsync(hdfsFS bfs __attribute__((unused)),
+               hdfsFile bfile __attribute__((unused)))
+{
+    errno = ENOTSUP;
+    return -1;
+}
+
+int ndfs_available(hdfsFS bfs __attribute__((unused)),
+                   hdfsFile bfile __attribute__((unused)))
+{
+    errno = ENOTSUP;
+    return -1;
+}
+
+struct hadoopRzBuffer* ndfs_read_zero(
+            hdfsFile bfile __attribute__((unused)),
+            struct hadoopRzOptions *opts __attribute__((unused)),
+            int32_t maxLength __attribute__((unused)))
+{
+    errno = ENOTSUP;
+    return NULL;
+}
+
+void ndfs_rz_buffer_free(hdfsFile bfile __attribute__((unused)),
+                struct hadoopRzBuffer *buffer __attribute__((unused)))
+{
+}
+
+int ndfs_file_uses_direct_read(hdfsFile bfile)
+{
+    // Set the 'disable direct reads' flag so that old test harnesses designed
+    // to test jniFS will run against NDFS.  The flag doesn't do anything,
+    // since all reads are always direct in NDFS.
+    struct native_file_base *file = (struct native_file_base *)bfile;
+    return (!(file->flags & NDFS_FILE_FLAG_DISABLE_DIRECT_READ));
+}
+
+void ndfs_file_disable_direct_read(hdfsFile bfile __attribute__((unused)))
+{
+    struct native_file_base *file = (struct native_file_base *)bfile;
+    file->flags |= NDFS_FILE_FLAG_DISABLE_DIRECT_READ;
+}
+
+char***
+ndfs_get_hosts(hdfsFS bfs __attribute__((unused)),
+               const char* path __attribute__((unused)),
+               tOffset start __attribute__((unused)),
+               tOffset length __attribute__((unused)))
+{
+    errno = ENOTSUP;
+    return NULL;
+}
+
+// vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-native-core/src/main/native/ndfs/file.h b/hadoop-native-core/src/main/native/ndfs/file.h
new file mode 100644
index 0000000..79cb87d
--- /dev/null
+++ b/hadoop-native-core/src/main/native/ndfs/file.h
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HADOOP_CORE_NDFS_FILE_H
+#define HADOOP_CORE_NDFS_FILE_H
+
+#include "fs/hdfs.h"
+
+#include <stdint.h>
+
+int ndfs_file_is_open_for_read(hdfsFile bfile);
+int ndfs_file_is_open_for_write(hdfsFile bfile);
+int ndfs_file_get_read_statistics(hdfsFile bfile,
+            struct hdfsReadStatistics **out);
+hdfsFile ndfs_open_file(hdfsFS bfs, const char* uri, int flags, 
+                      int buffer_size, short replication, tSize block_size);
+int ndfs_close_file(hdfsFS fs, hdfsFile bfile);
+int ndfs_seek(hdfsFS bfs, hdfsFile bfile, tOffset desiredPos);
+int64_t ndfs_tell(hdfsFS bfs, hdfsFile bfile);
+tSize ndfs_read(hdfsFS bfs, hdfsFile bfile, void *buffer, tSize length);
+tSize ndfs_pread(hdfsFS bfs, hdfsFile bfile, tOffset position,
+                 void* buffer, tSize length);
+tSize ndfs_write(hdfsFS bfs, hdfsFile bfile, const void* buffer, tSize length);
+int ndfs_flush(hdfsFS bfs, hdfsFile bfile);
+int ndfs_hflush(hdfsFS bfs, hdfsFile bfile);
+int ndfs_hsync(hdfsFS bfs, hdfsFile bfile);
+int ndfs_available(hdfsFS bfs, hdfsFile bfile);
+struct hadoopRzBuffer* ndfs_read_zero(hdfsFile bfile,
+            struct hadoopRzOptions *opts, int32_t maxLength);
+void ndfs_rz_buffer_free(hdfsFile bfile, struct hadoopRzBuffer *buffer);
+int ndfs_file_uses_direct_read(hdfsFile bfile);
+void ndfs_file_disable_direct_read(hdfsFile bfile);
+char*** ndfs_get_hosts(hdfsFS bfs, const char* path,
+                       tOffset start, tOffset length);
+
+#endif
+
+// vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-native-core/src/main/native/ndfs/ndfs.c b/hadoop-native-core/src/main/native/ndfs/meta.c
similarity index 67%
rename from hadoop-native-core/src/main/native/ndfs/ndfs.c
rename to hadoop-native-core/src/main/native/ndfs/meta.c
index 773baad..33f55bc 100644
--- a/hadoop-native-core/src/main/native/ndfs/ndfs.c
+++ b/hadoop-native-core/src/main/native/ndfs/meta.c
@@ -23,6 +23,9 @@
 #include "common/uri.h"
 #include "fs/common.h"
 #include "fs/fs.h"
+#include "ndfs/meta.h"
+#include "ndfs/permission.h"
+#include "ndfs/util.h"
 #include "protobuf/ClientNamenodeProtocol.call.h"
 #include "protobuf/hdfs.pb-c.h.s"
 #include "rpc/messenger.h"
@@ -37,8 +40,6 @@
 
 #define DEFAULT_NN_PORT 8020
 
-#define CLIENT_NN_PROTOCOL "org.apache.hadoop.hdfs.protocol.ClientProtocol"
-
 #define FS_PERMISSIONS_UMASK_KEY "fs.permissions.umask-mode"
 
 #define FS_PERMISSIONS_UMASK_DEFAULT "022"
@@ -49,74 +50,6 @@
 #define DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT \
     (10LL * 60LL * 1000LL)
 
-struct native_fs {
-    /** Fields common to all filesystems. */
-    struct hadoop_fs_base base;
-
-    /**
-     * Address of the namenode.
-     * TODO: implement HA failover
-     * TODO: implement IPv6
-     */
-    struct sockaddr_in nn_addr;
-
-    /** The messenger used to perform RPCs. */
-    struct hrpc_messenger *msgr;
-
-    /** The default block size obtained from getServerDefaults. */
-    int64_t default_block_size;
-
-    /** Prefix to use when building URLs. */
-    char *url_prefix;
-
-    /** URI that was used when connecting. */
-    struct hadoop_uri *conn_uri;
-
-    /**
-     * A dynamically allocated working directory which will be prepended to
-     * all relative paths.
-     */
-    struct hadoop_uri *working_uri;
-
-    /** Lock which protects the working_uri. */
-    uv_mutex_t working_uri_lock;
-
-    /** Umask to use when creating files */
-    uint32_t umask;
-
-    /** How long to wait, in nanoseconds, before re-trying a dead datanode. */
-    uint64_t dead_dn_timeout_ns;
-};
-
-/**
- * Set if the file is read-only... otherwise, the file is assumed to be
- * write-only.
- */
-#define NDFS_FILE_FLAG_RO                       0x1
-
-/** This flag is for compatibility with some old test harnesses. */
-#define NDFS_FILE_FLAG_DISABLE_DIRECT_READ      0x2
-
-/** Base class for both read-only and write-only files. */
-struct native_file_base {
-    /** Fields common to all filesystems. */
-    struct hadoop_file_base base;
-
-    /** NDFS file flags. */
-    int flags;
-};
-
-/** A read-only file. */
-struct native_ro_file {
-    struct native_file_base base;
-    uint64_t bytes_read;
-};
-
-/** A write-only file. */
-struct native_wo_file {
-    struct native_file_base base;
-};
-
 /** Whole-filesystem stats sent back from the NameNode. */
 struct hadoop_vfs_stats {
     int64_t capacity;
@@ -132,95 +65,13 @@
     uint64_t blocksize;
 };
 
-static hdfsFileInfo *ndfs_get_path_info(hdfsFS bfs, const char* uri);
-
 static struct hadoop_err *ndfs_connect_setup_conf(struct native_fs *fs,
                                                   struct hconf *hconf);
 
-static void ndfs_free(struct native_fs *fs);
-
-struct hadoop_err *populate_file_info(struct file_info *info,
+static struct hadoop_err *populate_file_info(struct file_info *info,
                     HdfsFileStatusProto *status, const char *prefix);
 
-static void ndfs_nn_proxy_init(struct native_fs *fs, struct hrpc_proxy *proxy)
-{
-    hrpc_proxy_init(proxy, fs->msgr, &fs->nn_addr, CLIENT_NN_PROTOCOL,
-                    fs->conn_uri->user_info);
-}
-
-/**
- * Construct a canonical path from a URI.
- *
- * @param fs        The filesystem.
- * @param uri       The URI.
- * @param out       (out param) the canonical path.
- *
- * @return          NULL on success; the error otherwise.
- */
-static struct hadoop_err *build_path(struct native_fs *fs, const char *uri_str,
-                                     char **out)
-{
-    struct hadoop_err *err = NULL;
-    struct hadoop_uri *uri = NULL;
-
-    uv_mutex_lock(&fs->working_uri_lock);
-    err = hadoop_uri_parse(uri_str, fs->working_uri, &uri, H_URI_PARSE_PATH);
-    if (err)
-        goto done;
-    // TODO: check URI scheme and user against saved values?
-    if (uri->path[0]) {
-        *out = strdup(uri->path);
-    } else {
-        // As a special case, when the URI given has an empty path, we assume that
-        // we want the current working directory.  This is to allow things like
-        // hdfs://mynamenode to map to the current working directory, as they do in
-        // Hadoop.  Note that this is different than hdfs://mynamenode/ (note the
-        // trailing slash) which maps to the root directory.
-        *out = strdup(fs->working_uri->path);
-    }
-    if (!*out) {
-        err = hadoop_lerr_alloc(ENOMEM, "build_path: out of memory.");
-        goto done;
-    }
-    err = NULL;
-
-done:
-    uv_mutex_unlock(&fs->working_uri_lock);
-    hadoop_uri_free(uri);
-    return err;
-}
-
-static int ndfs_file_is_open_for_read(hdfsFile bfile)
-{
-    struct native_file_base *file = (struct native_file_base *)bfile;
-    return !!(file->flags & NDFS_FILE_FLAG_RO);
-}
-
-static int ndfs_file_is_open_for_write(hdfsFile bfile)
-{
-    struct native_file_base *file = (struct native_file_base *)bfile;
-    return !(file->flags & NDFS_FILE_FLAG_RO);
-}
-
-static int ndfs_file_get_read_statistics(hdfsFile bfile,
-            struct hdfsReadStatistics **out)
-{
-    struct hdfsReadStatistics *stats;
-    struct native_ro_file *file = (struct native_ro_file *)bfile;
-
-    if (!(file->base.flags & NDFS_FILE_FLAG_RO)) {
-        errno = EINVAL;
-        return -1;
-    }
-    stats = calloc(1, sizeof(*stats));
-    if (!stats) {
-        errno = ENOMEM; 
-        return -1;
-    }
-    stats->totalBytesRead = file->bytes_read;
-    *out = stats;
-    return 0;
-}
+static void ndfs_free(struct native_fs *fs);
 
 static struct hadoop_err *ndfs_get_server_defaults(struct native_fs *fs,
             struct ndfs_server_defaults *defaults)
@@ -410,24 +261,6 @@
     return NULL; 
 }
 
-static struct hadoop_err *parse_permission(const char *str, uint32_t *perm)
-{
-    if (strspn(str, " 01234567") != strlen(str)) {
-        // TODO: support permission strings as in PermissionParser.java
-        return hadoop_lerr_alloc(ENOTSUP, "parse_permission(%s): "
-            "can't parse non-octal permissions (yet)", str);
-    }
-    errno = 0;
-    *perm = strtol(str, NULL, 8);
-    if (errno) {
-        int ret = errno;
-        return hadoop_lerr_alloc(EINVAL, "parse_permission(%s): "
-                "failed to parse this octal string: %s",
-                str, terror(ret));
-    }
-    return NULL;
-}
-
 /**
  * Configure the native file system using the Hadoop configuration.
  *
@@ -472,7 +305,7 @@
     free(fs);
 }
 
-static int ndfs_disconnect(hdfsFS bfs)
+int ndfs_disconnect(hdfsFS bfs)
 {
     struct native_fs *fs = (struct native_fs*)bfs;
 
@@ -480,63 +313,7 @@
     return 0;
 }
 
-static struct hadoop_err *ndfs_open_file_for_read(
-        struct native_ro_file **out __attribute__((unused)),
-        struct native_fs *fs __attribute__((unused)),
-        const char *uri __attribute__((unused)))
-{
-    errno = ENOTSUP;
-    return NULL;
-}
-
-static struct hadoop_err *ndfs_open_file_for_write(
-        struct native_ro_file **out __attribute__((unused)),
-        struct native_fs *fs __attribute__((unused)),
-        const char *uri __attribute__((unused)),
-        int buffer_size __attribute__((unused)),
-        short replication __attribute__((unused)),
-        tSize block_size __attribute__((unused)))
-{
-    errno = ENOTSUP;
-    return NULL;
-}
-
-static hdfsFile ndfs_open_file(hdfsFS bfs, const char* uri, int flags, 
-                      int buffer_size, short replication, tSize block_size)
-{
-    struct native_fs *fs = (struct native_fs *)bfs;
-    struct native_ro_file *file = NULL;
-    struct hadoop_err *err;
-    int accmode;
-    char *path = NULL;
-
-    err = build_path(fs, uri, &path);
-    if (err) {
-        goto done;
-    }
-    accmode = flags & O_ACCMODE;
-    if (accmode == O_RDONLY) {
-        err = ndfs_open_file_for_read(&file, fs, path);
-    } else if (accmode == O_WRONLY) {
-        err = ndfs_open_file_for_write(&file, fs, path,
-                        buffer_size, replication, block_size);
-    } else {
-        err = hadoop_lerr_alloc(EINVAL, "cannot open a hadoop file in "
-                "mode 0x%x\n", accmode);
-    }
-done:
-    free(path);
-    return hadoopfs_errno_and_retptr(err, file);
-}
-
-static int ndfs_close_file(hdfsFS fs __attribute__((unused)),
-                           hdfsFile bfile __attribute__((unused)))
-{
-    errno = ENOTSUP;
-    return -1;
-}
-
-static int ndfs_file_exists(hdfsFS bfs, const char *uri)
+int ndfs_file_exists(hdfsFS bfs, const char *uri)
 {
     static hdfsFileInfo *info;
 
@@ -549,78 +326,7 @@
     return 0;
 }
 
-static int ndfs_seek(hdfsFS bfs __attribute__((unused)),
-                     hdfsFile bfile __attribute__((unused)),
-                     tOffset desiredPos __attribute__((unused)))
-{
-    errno = ENOTSUP;
-    return -1;
-}
-
-static tOffset ndfs_tell(hdfsFS bfs __attribute__((unused)),
-                         hdfsFile bfile __attribute__((unused)))
-{
-    errno = ENOTSUP;
-    return -1;
-}
-
-static tSize ndfs_read(hdfsFS bfs __attribute__((unused)),
-                       hdfsFile bfile __attribute__((unused)),
-                       void *buffer __attribute__((unused)),
-                       tSize length __attribute__((unused)))
-{
-    errno = ENOTSUP;
-    return -1;
-}
-
-static tSize ndfs_pread(hdfsFS bfs __attribute__((unused)),
-            hdfsFile bfile __attribute__((unused)),
-            tOffset position __attribute__((unused)),
-            void* buffer __attribute__((unused)),
-            tSize length __attribute__((unused)))
-{
-    errno = ENOTSUP;
-    return -1;
-}
-
-static tSize ndfs_write(hdfsFS bfs __attribute__((unused)),
-            hdfsFile bfile __attribute__((unused)),
-            const void* buffer __attribute__((unused)),
-            tSize length __attribute__((unused)))
-{
-    errno = ENOTSUP;
-    return -1;
-}
-
-static int ndfs_flush(hdfsFS bfs __attribute__((unused)),
-                      hdfsFile bfile __attribute__((unused)))
-{
-    errno = ENOTSUP;
-    return -1;
-}
-
-static int ndfs_hflush(hdfsFS bfs __attribute__((unused)),
-                      hdfsFile bfile __attribute__((unused)))
-{
-    errno = ENOTSUP;
-    return -1;
-}
-
-static int ndfs_hsync(hdfsFS bfs __attribute__((unused)),
-                      hdfsFile bfile __attribute__((unused)))
-{
-    errno = ENOTSUP;
-    return -1;
-}
-
-static int ndfs_available(hdfsFS bfs __attribute__((unused)),
-                          hdfsFile bfile __attribute__((unused)))
-{
-    errno = ENOTSUP;
-    return -1;
-}
-
-static int ndfs_unlink(struct hdfs_internal *bfs,
+int ndfs_unlink(struct hdfs_internal *bfs,
                 const char *uri, int recursive)
 {
     struct native_fs *fs = (struct native_fs*)bfs;
@@ -655,7 +361,7 @@
     return hadoopfs_errno_and_retcode(err);
 }
 
-static int ndfs_rename(hdfsFS bfs, const char *src_uri, const char *dst_uri)
+int ndfs_rename(hdfsFS bfs, const char *src_uri, const char *dst_uri)
 {
     struct native_fs *fs = (struct native_fs*)bfs;
     struct hadoop_err *err = NULL;
@@ -690,8 +396,7 @@
     return hadoopfs_errno_and_retcode(err);
 }
 
-static char* ndfs_get_working_directory(hdfsFS bfs, char *buffer,
-                                       size_t bufferSize)
+char* ndfs_get_working_directory(hdfsFS bfs, char *buffer, size_t bufferSize)
 {
     size_t len;
     struct native_fs *fs = (struct native_fs *)bfs;
@@ -712,7 +417,7 @@
     return hadoopfs_errno_and_retptr(err, buffer);
 }
 
-static int ndfs_set_working_directory(hdfsFS bfs, const char* uri_str)
+int ndfs_set_working_directory(hdfsFS bfs, const char* uri_str)
 {
     struct native_fs *fs = (struct native_fs *)bfs;
     struct hadoop_err *err = NULL;
@@ -734,7 +439,7 @@
     return hadoopfs_errno_and_retcode(err);
 }
 
-static int ndfs_mkdir(hdfsFS bfs, const char* uri)
+int ndfs_mkdir(hdfsFS bfs, const char* uri)
 {
     struct native_fs *fs = (struct native_fs *)bfs;
     struct hadoop_err *err = NULL;
@@ -774,8 +479,7 @@
     return hadoopfs_errno_and_retcode(err);
 }
 
-static int ndfs_set_replication(hdfsFS bfs, const char* uri,
-                               int16_t replication)
+int ndfs_set_replication(hdfsFS bfs, const char* uri, int16_t replication)
 {
     struct native_fs *fs = (struct native_fs *)bfs;
     struct hadoop_err *err = NULL;
@@ -809,7 +513,7 @@
     return hadoopfs_errno_and_retcode(err);
 }
 
-struct hadoop_err *ndfs_list_partial(struct native_fs *fs,
+static struct hadoop_err *ndfs_list_partial(struct native_fs *fs,
         const char *path, const char *prev, uint32_t *entries_len,
         hdfsFileInfo **entries, uint32_t *remaining)
 {
@@ -868,7 +572,7 @@
     return err;
 }
 
-struct hadoop_err *populate_file_info(struct file_info *info,
+static struct hadoop_err *populate_file_info(struct file_info *info,
                     HdfsFileStatusProto *status, const char *prefix)
 {
     if (status->filetype == IS_DIR) {
@@ -909,8 +613,7 @@
                             info->mName);
 }
 
-static hdfsFileInfo* ndfs_list_directory(hdfsFS bfs,
-                    const char* uri, int *numEntries)
+hdfsFileInfo* ndfs_list_directory(hdfsFS bfs, const char* uri, int *numEntries)
 {
     struct native_fs *fs = (struct native_fs *)bfs;
     struct hadoop_err *err = NULL;
@@ -953,7 +656,7 @@
     return hadoopfs_errno_and_retptr(err, entries);
 }
 
-static hdfsFileInfo *ndfs_get_path_info(hdfsFS bfs, const char* uri)
+hdfsFileInfo *ndfs_get_path_info(hdfsFS bfs, const char* uri)
 {
     struct native_fs *fs = (struct native_fs*)bfs;
     struct hadoop_err *err = NULL;
@@ -1011,24 +714,13 @@
     return hadoopfs_errno_and_retptr(err, info);
 }
 
-char***
-ndfs_get_hosts(hdfsFS bfs __attribute__((unused)),
-               const char* path __attribute__((unused)),
-               tOffset start __attribute__((unused)),
-               tOffset length __attribute__((unused)))
-{
-    errno = ENOTSUP;
-    return NULL;
-}
-
-static tOffset ndfs_get_default_block_size(hdfsFS bfs)
+tOffset ndfs_get_default_block_size(hdfsFS bfs)
 {
     struct native_fs *fs = (struct native_fs *)bfs;
     return fs->default_block_size;
 }
 
-static tOffset ndfs_get_default_block_size_at_path(hdfsFS bfs,
-                    const char *uri)
+tOffset ndfs_get_default_block_size_at_path(hdfsFS bfs, const char *uri)
 {
     struct native_fs *fs = (struct native_fs *)bfs;
     struct hadoop_err *err = NULL;
@@ -1062,7 +754,7 @@
     return ret;
 }
 
-static struct hadoop_err *ndfs_statvfs(struct hadoop_fs_base *hfs,
+struct hadoop_err *ndfs_statvfs(struct hadoop_fs_base *hfs,
         struct hadoop_vfs_stats *stats)
 {
     struct native_fs *fs = (struct native_fs*)hfs;
@@ -1091,7 +783,7 @@
     return err;
 }
 
-static tOffset ndfs_get_capacity(hdfsFS bfs)
+tOffset ndfs_get_capacity(hdfsFS bfs)
 {
     struct hadoop_err *err;
     struct hadoop_vfs_stats stats;
@@ -1102,7 +794,7 @@
     return stats.capacity;
 }
 
-static tOffset ndfs_get_used(hdfsFS bfs)
+tOffset ndfs_get_used(hdfsFS bfs)
 {
     struct hadoop_err *err;
     struct hadoop_vfs_stats stats;
@@ -1113,7 +805,7 @@
     return stats.used;
 }
 
-static int ndfs_chown(hdfsFS bfs, const char* uri,
+int ndfs_chown(hdfsFS bfs, const char* uri,
                       const char *user, const char *group)
 {
     struct native_fs *fs = (struct native_fs *)bfs;
@@ -1144,7 +836,7 @@
     return hadoopfs_errno_and_retcode(err);
 }
 
-static int ndfs_chmod(hdfsFS bfs, const char* uri, short mode)
+int ndfs_chmod(hdfsFS bfs, const char* uri, short mode)
 {
     struct native_fs *fs = (struct native_fs *)bfs;
     FsPermissionProto perm = FS_PERMISSION_PROTO__INIT;
@@ -1175,8 +867,7 @@
     return hadoopfs_errno_and_retcode(err);
 }
 
-static int ndfs_utime(hdfsFS bfs, const char* uri,
-                      int64_t mtime, int64_t atime)
+int ndfs_utime(hdfsFS bfs, const char* uri, int64_t mtime, int64_t atime)
 {
     struct native_fs *fs = (struct native_fs *)bfs;
     SetTimesRequestProto req = SET_TIMES_REQUEST_PROTO__INIT ;
@@ -1221,78 +912,4 @@
     return hadoopfs_errno_and_retcode(err);
 }
 
-static struct hadoopRzBuffer* ndfs_read_zero(
-            hdfsFile bfile __attribute__((unused)),
-            struct hadoopRzOptions *opts __attribute__((unused)),
-            int32_t maxLength __attribute__((unused)))
-{
-    errno = ENOTSUP;
-    return NULL;
-}
-
-static void ndfs_rz_buffer_free(hdfsFile bfile __attribute__((unused)),
-                struct hadoopRzBuffer *buffer __attribute__((unused)))
-{
-}
-
-int ndfs_file_uses_direct_read(hdfsFile bfile)
-{
-    // Set the 'disable direct reads' flag so that old test harnesses designed
-    // to test jniFS will run against NDFS.  The flag doesn't do anything,
-    // since all reads are always direct in NDFS.
-    struct native_file_base *file = (struct native_file_base *)bfile;
-    return (!(file->flags & NDFS_FILE_FLAG_DISABLE_DIRECT_READ));
-}
-
-void ndfs_file_disable_direct_read(hdfsFile bfile __attribute__((unused)))
-{
-    struct native_file_base *file = (struct native_file_base *)bfile;
-    file->flags |= NDFS_FILE_FLAG_DISABLE_DIRECT_READ;
-}
-
-const struct hadoop_fs_ops g_ndfs_ops = {
-    .name = "ndfs",
-    .file_is_open_for_read = ndfs_file_is_open_for_read,
-    .file_is_open_for_write = ndfs_file_is_open_for_write,
-    .get_read_statistics = ndfs_file_get_read_statistics,
-    .connect = ndfs_connect,
-    .disconnect = ndfs_disconnect,
-    .open = ndfs_open_file,
-    .close = ndfs_close_file,
-    .exists = ndfs_file_exists,
-    .seek = ndfs_seek,
-    .tell = ndfs_tell,
-    .read = ndfs_read,
-    .pread = ndfs_pread,
-    .write = ndfs_write,
-    .flush = ndfs_flush,
-    .hflush = ndfs_hflush,
-    .hsync = ndfs_hsync,
-    .available = ndfs_available,
-    .copy = NULL,
-    .move = NULL,
-    .unlink = ndfs_unlink,
-    .rename = ndfs_rename,
-    .get_working_directory = ndfs_get_working_directory,
-    .set_working_directory = ndfs_set_working_directory,
-    .mkdir = ndfs_mkdir,
-    .set_replication = ndfs_set_replication,
-    .list_directory = ndfs_list_directory,
-    .get_path_info = ndfs_get_path_info,
-    .get_hosts = ndfs_get_hosts,
-    .get_default_block_size = ndfs_get_default_block_size,
-    .get_default_block_size_at_path = ndfs_get_default_block_size_at_path,
-    .get_capacity = ndfs_get_capacity,
-    .get_used = ndfs_get_used,
-    .chown = ndfs_chown,
-    .chmod = ndfs_chmod,
-    .utime = ndfs_utime,
-    .read_zero = ndfs_read_zero,
-    .rz_buffer_free = ndfs_rz_buffer_free,
-
-    // test
-    .file_uses_direct_read = ndfs_file_uses_direct_read,
-    .file_disable_direct_read = ndfs_file_disable_direct_read,
-};
-
 // vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-native-core/src/main/native/ndfs/meta.h b/hadoop-native-core/src/main/native/ndfs/meta.h
new file mode 100644
index 0000000..d43fee8
--- /dev/null
+++ b/hadoop-native-core/src/main/native/ndfs/meta.h
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HADOOP_CORE_NDFS_META_H
+#define HADOOP_CORE_NDFS_META_H
+
+#include <netinet/in.h>
+#include <uv.h>
+
+struct hadoop_vfs_stats;
+
+struct native_fs {
+    /** Fields common to all filesystems. */
+    struct hadoop_fs_base base;
+
+    /**
+     * Address of the namenode.
+     * TODO: implement HA failover
+     * TODO: implement IPv6
+     */
+    struct sockaddr_in nn_addr;
+
+    /** The messenger used to perform RPCs. */
+    struct hrpc_messenger *msgr;
+
+    /** The default block size obtained from getServerDefaults. */
+    int64_t default_block_size;
+
+    /** Prefix to use when building URLs. */
+    char *url_prefix;
+
+    /** URI that was used when connecting. */
+    struct hadoop_uri *conn_uri;
+
+    /**
+     * A dynamically allocated working directory which will be prepended to
+     * all relative paths.
+     */
+    struct hadoop_uri *working_uri;
+
+    /** Lock which protects the working_uri. */
+    uv_mutex_t working_uri_lock;
+
+    /** Umask to use when creating files */
+    uint32_t umask;
+
+    /** How long to wait, in nanoseconds, before re-trying a dead datanode. */
+    uint64_t dead_dn_timeout_ns;
+};
+
+struct hadoop_err *ndfs_connect(struct hdfsBuilder *hdfs_bld,
+                                struct hdfs_internal **out);
+int ndfs_disconnect(hdfsFS bfs);
+int ndfs_file_exists(hdfsFS bfs, const char *uri);
+int ndfs_unlink(struct hdfs_internal *bfs,
+                const char *uri, int recursive);
+int ndfs_rename(hdfsFS bfs, const char *src_uri, const char *dst_uri);
+char* ndfs_get_working_directory(hdfsFS bfs, char *buffer, size_t bufferSize);
+int ndfs_set_working_directory(hdfsFS bfs, const char* uri_str);
+int ndfs_mkdir(hdfsFS bfs, const char* uri);
+int ndfs_set_replication(hdfsFS bfs, const char* uri, int16_t replication);
+hdfsFileInfo* ndfs_list_directory(hdfsFS bfs, const char* uri, int *numEntries);
+hdfsFileInfo *ndfs_get_path_info(hdfsFS bfs, const char* uri);
+tOffset ndfs_get_default_block_size(hdfsFS bfs);
+tOffset ndfs_get_default_block_size_at_path(hdfsFS bfs, const char *uri);
+struct hadoop_err *ndfs_statvfs(struct hadoop_fs_base *hfs,
+        struct hadoop_vfs_stats *stats);
+tOffset ndfs_get_capacity(hdfsFS bfs);
+tOffset ndfs_get_used(hdfsFS bfs);
+int ndfs_chown(hdfsFS bfs, const char* uri, const char *user,
+               const char *group);
+int ndfs_chmod(hdfsFS bfs, const char* uri, short mode);
+int ndfs_utime(hdfsFS bfs, const char* uri, int64_t mtime, int64_t atime);
+
+#endif
+
+// vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-native-core/src/main/native/ndfs/ops.c b/hadoop-native-core/src/main/native/ndfs/ops.c
new file mode 100644
index 0000000..b863ab9
--- /dev/null
+++ b/hadoop-native-core/src/main/native/ndfs/ops.c
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "fs/fs.h"
+#include "ndfs/file.h"
+#include "ndfs/meta.h"
+
+const struct hadoop_fs_ops g_ndfs_ops = {
+    .name = "ndfs",
+    .file_is_open_for_read = ndfs_file_is_open_for_read,
+    .file_is_open_for_write = ndfs_file_is_open_for_write,
+    .get_read_statistics = ndfs_file_get_read_statistics,
+    .connect = ndfs_connect,
+    .disconnect = ndfs_disconnect,
+    .open = ndfs_open_file,
+    .close = ndfs_close_file,
+    .exists = ndfs_file_exists,
+    .seek = ndfs_seek,
+    .tell = ndfs_tell,
+    .read = ndfs_read,
+    .pread = ndfs_pread,
+    .write = ndfs_write,
+    .flush = ndfs_flush,
+    .hflush = ndfs_hflush,
+    .hsync = ndfs_hsync,
+    .available = ndfs_available,
+    .copy = NULL,
+    .move = NULL,
+    .unlink = ndfs_unlink,
+    .rename = ndfs_rename,
+    .get_working_directory = ndfs_get_working_directory,
+    .set_working_directory = ndfs_set_working_directory,
+    .mkdir = ndfs_mkdir,
+    .set_replication = ndfs_set_replication,
+    .list_directory = ndfs_list_directory,
+    .get_path_info = ndfs_get_path_info,
+    .get_hosts = ndfs_get_hosts,
+    .get_default_block_size = ndfs_get_default_block_size,
+    .get_default_block_size_at_path = ndfs_get_default_block_size_at_path,
+    .get_capacity = ndfs_get_capacity,
+    .get_used = ndfs_get_used,
+    .chown = ndfs_chown,
+    .chmod = ndfs_chmod,
+    .utime = ndfs_utime,
+    .read_zero = ndfs_read_zero,
+    .rz_buffer_free = ndfs_rz_buffer_free,
+
+    // test
+    .file_uses_direct_read = ndfs_file_uses_direct_read,
+    .file_disable_direct_read = ndfs_file_disable_direct_read,
+};
+
+// vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-native-core/src/main/native/ndfs/permission.c b/hadoop-native-core/src/main/native/ndfs/permission.c
new file mode 100644
index 0000000..9e21a73
--- /dev/null
+++ b/hadoop-native-core/src/main/native/ndfs/permission.c
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/hadoop_err.h"
+#include "ndfs/permission.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+struct hadoop_err *parse_permission(const char *str, uint32_t *perm)
+{
+    if (strspn(str, " 01234567") != strlen(str)) {
+        // TODO: support permission strings as in PermissionParser.java (see
+        // HADOOP-10981)
+        return hadoop_lerr_alloc(ENOTSUP, "parse_permission(%s): "
+            "can't parse non-octal permissions (yet)", str);
+    }
+    errno = 0;
+    *perm = strtol(str, NULL, 8);
+    if (errno) {
+        int ret = errno;
+        return hadoop_lerr_alloc(EINVAL, "parse_permission(%s): "
+                "failed to parse this octal string: %s",
+                str, terror(ret));
+    }
+    return NULL;
+}
+
+// vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-native-core/src/main/native/ndfs/permission.h b/hadoop-native-core/src/main/native/ndfs/permission.h
new file mode 100644
index 0000000..49a7d5e
--- /dev/null
+++ b/hadoop-native-core/src/main/native/ndfs/permission.h
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HADOOP_CORE_NDFS_PERMISSION_H
+#define HADOOP_CORE_NDFS_PERMISSION_H
+
+#include <stdint.h>
+
+struct hadoop_err;
+
+/**
+ * Parse a Hadoop permission string into a numeric value.
+ *
+ * @param str       The string to parse.
+ * @param perm      (out param) the numeric value.
+ *
+ * @return          NULL on success; the error otherwise. 
+ */
+struct hadoop_err *parse_permission(const char *str, uint32_t *perm);
+
+#endif
+
+// vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-native-core/src/main/native/ndfs/util.c b/hadoop-native-core/src/main/native/ndfs/util.c
new file mode 100644
index 0000000..da988db
--- /dev/null
+++ b/hadoop-native-core/src/main/native/ndfs/util.c
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/hadoop_err.h"
+#include "common/string.h"
+#include "common/uri.h"
+#include "fs/common.h"
+#include "fs/fs.h"
+#include "ndfs/meta.h"
+#include "protobuf/ClientNamenodeProtocol.call.h"
+#include "protobuf/hdfs.pb-c.h.s"
+#include "rpc/messenger.h"
+#include "rpc/proxy.h"
+
+#include <netinet/in.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <uv.h>
+
+#define CLIENT_NN_PROTOCOL "org.apache.hadoop.hdfs.protocol.ClientProtocol"
+
+void ndfs_nn_proxy_init(struct native_fs *fs, struct hrpc_proxy *proxy)
+{
+    hrpc_proxy_init(proxy, fs->msgr, &fs->nn_addr, CLIENT_NN_PROTOCOL,
+                    fs->conn_uri->user_info);
+}
+
+struct hadoop_err *build_path(struct native_fs *fs, const char *uri_str,
+                                     char **out)
+{
+    struct hadoop_err *err = NULL;
+    struct hadoop_uri *uri = NULL;
+
+    uv_mutex_lock(&fs->working_uri_lock);
+    err = hadoop_uri_parse(uri_str, fs->working_uri, &uri, H_URI_PARSE_PATH);
+    if (err)
+        goto done;
+    // TODO: check URI scheme and user against saved values?
+    if (uri->path[0]) {
+        *out = strdup(uri->path);
+    } else {
+        // As a special case, when the URI given has an empty path, we assume that
+        // we want the current working directory.  This is to allow things like
+        // hdfs://mynamenode to map to the current working directory, as they do in
+        // Hadoop.  Note that this is different than hdfs://mynamenode/ (note the
+        // trailing slash) which maps to the root directory.
+        *out = strdup(fs->working_uri->path);
+    }
+    if (!*out) {
+        err = hadoop_lerr_alloc(ENOMEM, "build_path: out of memory.");
+        goto done;
+    }
+    err = NULL;
+
+done:
+    uv_mutex_unlock(&fs->working_uri_lock);
+    hadoop_uri_free(uri);
+    return err;
+}
+
+// vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-native-core/src/main/native/ndfs/util.h b/hadoop-native-core/src/main/native/ndfs/util.h
new file mode 100644
index 0000000..4d6714a
--- /dev/null
+++ b/hadoop-native-core/src/main/native/ndfs/util.h
@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HADOOP_CORE_NDFS_UTIL_H
+#define HADOOP_CORE_NDFS_UTIL_H
+
+struct hadoop_err;
+struct hrpc_proxy;
+struct native_fs;
+
+/**
+ * Initialize a namenode proxy.
+ *
+ * @param fs            The native filesystem.
+ * @param proxy         (out param) The proxy to initialize.
+ */
+void ndfs_nn_proxy_init(struct native_fs *fs, struct hrpc_proxy *proxy);
+
+/**
+ * Construct a canonical path from a URI.
+ *
+ * @param fs        The filesystem.
+ * @param uri       The URI.
+ * @param out       (out param) the canonical path.
+ *
+ * @return          NULL on success; the error otherwise.
+ */
+struct hadoop_err *build_path(struct native_fs *fs, const char *uri_str,
+                                     char **out);
+
+#endif
+
+// vim: ts=4:sw=4:tw=79:et