blob: 823c3d8f38ea0f900ef4660de611b2cbcfeb5425 [file] [log] [blame]
<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
<!-- Generated by the JDiff Javadoc doclet -->
<!-- (http://www.jdiff.org) -->
<!-- on Sun May 31 20:46:08 PDT 2009 -->
<api
xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
xsi:noNamespaceSchemaLocation='api.xsd'
name="hadoop-hdfs 0.20.0"
jdversion="1.0.9">
<!-- Command line arguments = -doclet jdiff.JDiff -docletpath /home/gkesavan/release-0.20.0/build/ivy/lib/Hadoop/jdiff/jdiff-1.0.9.jar:/home/gkesavan/release-0.20.0/build/ivy/lib/Hadoop/jdiff/xerces-1.4.4.jar -classpath /home/gkesavan/release-0.20.0/build/classes:/home/gkesavan/release-0.20.0/lib/commons-cli-2.0-SNAPSHOT.jar:/home/gkesavan/release-0.20.0/lib/hsqldb-1.8.0.10.jar:/home/gkesavan/release-0.20.0/lib/jsp-2.1/jsp-2.1.jar:/home/gkesavan/release-0.20.0/lib/jsp-2.1/jsp-api-2.1.jar:/home/gkesavan/release-0.20.0/lib/kfs-0.2.2.jar:/home/gkesavan/release-0.20.0/conf:/home/gkesavan/.ivy2/cache/commons-logging/commons-logging/jars/commons-logging-1.0.4.jar:/home/gkesavan/.ivy2/cache/log4j/log4j/jars/log4j-1.2.15.jar:/home/gkesavan/.ivy2/cache/commons-httpclient/commons-httpclient/jars/commons-httpclient-3.0.1.jar:/home/gkesavan/.ivy2/cache/commons-codec/commons-codec/jars/commons-codec-1.3.jar:/home/gkesavan/.ivy2/cache/xmlenc/xmlenc/jars/xmlenc-0.52.jar:/home/gkesavan/.ivy2/cache/net.java.dev.jets3t/jets3t/jars/jets3t-0.6.1.jar:/home/gkesavan/.ivy2/cache/commons-net/commons-net/jars/commons-net-1.4.1.jar:/home/gkesavan/.ivy2/cache/org.mortbay.jetty/servlet-api-2.5/jars/servlet-api-2.5-6.1.14.jar:/home/gkesavan/.ivy2/cache/oro/oro/jars/oro-2.0.8.jar:/home/gkesavan/.ivy2/cache/org.mortbay.jetty/jetty/jars/jetty-6.1.14.jar:/home/gkesavan/.ivy2/cache/org.mortbay.jetty/jetty-util/jars/jetty-util-6.1.14.jar:/home/gkesavan/.ivy2/cache/tomcat/jasper-runtime/jars/jasper-runtime-5.5.12.jar:/home/gkesavan/.ivy2/cache/tomcat/jasper-compiler/jars/jasper-compiler-5.5.12.jar:/home/gkesavan/.ivy2/cache/commons-el/commons-el/jars/commons-el-1.0.jar:/home/gkesavan/.ivy2/cache/junit/junit/jars/junit-3.8.1.jar:/home/gkesavan/.ivy2/cache/commons-logging/commons-logging-api/jars/commons-logging-api-1.0.4.jar:/home/gkesavan/.ivy2/cache/org.slf4j/slf4j-api/jars/slf4j-api-1.4.3.jar:/home/gkesavan/.ivy2/cache/org.eclipse.jdt/core/jars/core-3.1.1.jar:/home/gkesavan/.ivy2/cache/org.slf4j/slf4j-log4j12/jars/slf4j-log4j12-1.4.3.jar:/home/gkesavan/.ivy2/cache/jdiff/jdiff/jars/jdiff-1.0.9.jar:/home/gkesavan/.ivy2/cache/xerces/xerces/jars/xerces-1.4.4.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-launcher.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-resolver.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-starteam.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-netrexx.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-testutil.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jai.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-swing.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jmf.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-bcel.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jdepend.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jsch.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-bsf.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-antlr.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-weblogic.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-junit.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-log4j.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/xercesImpl.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-oro.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-trax.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-nodeps.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-commons-logging.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-regexp.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-stylebook.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-javamail.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-commons-net.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/xml-apis.jar:/home/gkesavan/tools/jdk1.6.0_07-32bit/lib/tools.jar -sourcepath /home/gkesavan/release-0.20.0/src/hdfs -apidir /home/gkesavan/release-0.20.0/lib/jdiff -apiname hadoop 0.20.1-dev -->
<package name="org.apache.hadoop.hdfs">
<!-- start class org.apache.hadoop.hdfs.ChecksumDistributedFileSystem -->
<class name="ChecksumDistributedFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="ChecksumDistributedFileSystem"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="ChecksumDistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
static="false" final="false" visibility="public"
deprecated="deprecated, no comment">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[@deprecated]]>
</doc>
</constructor>
<method name="getRawCapacity" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Return the total raw capacity of the filesystem, disregarding
replication .]]>
</doc>
</method>
<method name="getRawUsed" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Return the total raw used space in the filesystem, disregarding
replication .]]>
</doc>
</method>
<method name="getDataNodeStats" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Return statistics for each datanode.]]>
</doc>
</method>
<method name="setSafeMode" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Enter, leave or get safe mode.
@see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)]]>
</doc>
</method>
<method name="refreshNodes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="finalizeUpgrade"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Finalize previously upgraded files system state.]]>
</doc>
</method>
<method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="metaSave"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="pathname" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="reportChecksumFailure" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
<param name="inPos" type="long"/>
<param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
<param name="sumsPos" type="long"/>
<doc>
<![CDATA[We need to find the blocks that didn't match. Likely only one
is corrupt but we will report both to the namenode. In the future,
we can consider figuring out exactly which block is corrupt.]]>
</doc>
</method>
<method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Returns the stat information about the file.]]>
</doc>
</method>
<doc>
<![CDATA[An implementation of ChecksumFileSystem over DistributedFileSystem.
Note that as of now (May 07), DistributedFileSystem natively checksums
all of its data. Using this class is not be necessary in most cases.
Currently provided mainly for backward compatibility and testing.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.ChecksumDistributedFileSystem -->
<!-- start class org.apache.hadoop.hdfs.DFSClient -->
<class name="DFSClient" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
<implements name="java.io.Closeable"/>
<constructor name="DFSClient" type="org.apache.hadoop.conf.Configuration"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a new DFSClient connected to the default namenode.]]>
</doc>
</constructor>
<constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem.Statistics"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a new DFSClient connected to the given namenode server.]]>
</doc>
</constructor>
<constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="nameNodeAddr" type="java.net.InetSocketAddress"/>
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="close"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Close the file system, abandoning all of the leases and files being
created and close connections to the namenode.]]>
</doc>
</method>
<method name="getDefaultBlockSize" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the default block size for this cluster
@return the default block size in bytes]]>
</doc>
</method>
<method name="getBlockSize" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="reportBadBlocks"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Report corrupt blocks that were discovered by the client.]]>
</doc>
</method>
<method name="getDefaultReplication" return="short"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getHints" return="java.lang.String[][]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="Use getBlockLocations instead
Get hints about the location of the indicated block(s).
getHints() returns a list of hostnames that store data for
a specific file region. It returns a set of hostnames for
every block within the indicated region.
This function is very useful when writing code that considers
data-placement when performing operations. For example, the
MapReduce system tries to schedule tasks on the same machines
as the data-block the task processes.">
<param name="src" type="java.lang.String"/>
<param name="start" type="long"/>
<param name="length" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[@deprecated Use getBlockLocations instead
Get hints about the location of the indicated block(s).
getHints() returns a list of hostnames that store data for
a specific file region. It returns a set of hostnames for
every block within the indicated region.
This function is very useful when writing code that considers
data-placement when performing operations. For example, the
MapReduce system tries to schedule tasks on the same machines
as the data-block the task processes.]]>
</doc>
</method>
<method name="getBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="start" type="long"/>
<param name="length" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get block location info about file
getBlockLocations() returns a list of hostnames that store
data for a specific file region. It returns a set of hostnames
for every block within the indicated region.
This function is very useful when writing code that considers
data-placement when performing operations. For example, the
MapReduce system tries to schedule tasks on the same machines
as the data-block the task processes.]]>
</doc>
</method>
<method name="open" return="org.apache.hadoop.hdfs.DFSClient.DFSInputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="create" return="java.io.OutputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="overwrite" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a new dfs file and return an output stream for writing into it.
@param src stream name
@param overwrite do not check for file existence if true
@return output stream
@throws IOException]]>
</doc>
</method>
<method name="create" return="java.io.OutputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="overwrite" type="boolean"/>
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a new dfs file and return an output stream for writing into it
with write-progress reporting.
@param src stream name
@param overwrite do not check for file existence if true
@return output stream
@throws IOException]]>
</doc>
</method>
<method name="create" return="java.io.OutputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="overwrite" type="boolean"/>
<param name="replication" type="short"/>
<param name="blockSize" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a new dfs file with the specified block replication
and return an output stream for writing into the file.
@param src stream name
@param overwrite do not check for file existence if true
@param replication block replication
@return output stream
@throws IOException]]>
</doc>
</method>
<method name="create" return="java.io.OutputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="overwrite" type="boolean"/>
<param name="replication" type="short"/>
<param name="blockSize" type="long"/>
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a new dfs file with the specified block replication
with write-progress reporting and return an output stream for writing
into the file.
@param src stream name
@param overwrite do not check for file existence if true
@param replication block replication
@return output stream
@throws IOException]]>
</doc>
</method>
<method name="create" return="java.io.OutputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="overwrite" type="boolean"/>
<param name="replication" type="short"/>
<param name="blockSize" type="long"/>
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
<param name="buffersize" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Call
{@link #create(String,FsPermission,boolean,short,long,Progressable,int)}
with default permission.
@see FsPermission#getDefault()]]>
</doc>
</method>
<method name="create" return="java.io.OutputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
<param name="overwrite" type="boolean"/>
<param name="replication" type="short"/>
<param name="blockSize" type="long"/>
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
<param name="buffersize" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a new dfs file with the specified block replication
with write-progress reporting and return an output stream for writing
into the file.
@param src stream name
@param permission The permission of the directory being created.
If permission == null, use {@link FsPermission#getDefault()}.
@param overwrite do not check for file existence if true
@param replication block replication
@return output stream
@throws IOException
@see ClientProtocol#create(String, FsPermission, String, boolean, short, long)]]>
</doc>
</method>
<method name="setReplication" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="replication" type="short"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set replication for an existing file.
@see ClientProtocol#setReplication(String, short)
@param replication
@throws IOException
@return true is successful or false if file does not exist]]>
</doc>
</method>
<method name="rename" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="dst" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Rename file or directory.
See {@link ClientProtocol#rename(String, String)}.]]>
</doc>
</method>
<method name="delete" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Delete file or directory.
See {@link ClientProtocol#delete(String)}.]]>
</doc>
</method>
<method name="delete" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="recursive" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[delete file or directory.
delete contents of the directory if non empty and recursive
set to true]]>
</doc>
</method>
<method name="exists" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Implemented using getFileInfo(src)]]>
</doc>
</method>
<method name="isDirectory" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="Use getFileStatus() instead">
<param name="src" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[@deprecated Use getFileStatus() instead]]>
</doc>
</method>
<method name="listPaths" return="org.apache.hadoop.fs.FileStatus[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getFileInfo" return="org.apache.hadoop.fs.FileStatus"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="namenode" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
<param name="socketFactory" type="javax.net.SocketFactory"/>
<param name="socketTimeout" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get the checksum of a file.
@param src The file path
@return The checksum]]>
</doc>
</method>
<method name="setPermission"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set permissions to a file or directory.
@param src path name.
@param permission
@throws <code>FileNotFoundException</code> is file does not exist.]]>
</doc>
</method>
<method name="setOwner"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="username" type="java.lang.String"/>
<param name="groupname" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set file or directory owner.
@param src path name.
@param username user id.
@param groupname user group.
@throws <code>FileNotFoundException</code> is file does not exist.]]>
</doc>
</method>
<method name="getDiskStatus" return="org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="totalRawCapacity" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="totalRawUsed" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getMissingBlocksCount" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Returns count of blocks with no good replicas left. Normally should be
zero.
@throws IOException]]>
</doc>
</method>
<method name="getUnderReplicatedBlocksCount" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Returns count of blocks with one of more replica missing.
@throws IOException]]>
</doc>
</method>
<method name="getCorruptBlocksCount" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Returns count of blocks with at least one replica marked corrupt.
@throws IOException]]>
</doc>
</method>
<method name="datanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="setSafeMode" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Enter, leave or get safe mode.
See {@link ClientProtocol#setSafeMode(FSConstants.SafeModeAction)}
for more details.
@see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)]]>
</doc>
</method>
<method name="refreshNodes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Refresh the hosts and exclude files. (Rereads them.)
See {@link ClientProtocol#refreshNodes()}
for more details.
@see ClientProtocol#refreshNodes()]]>
</doc>
</method>
<method name="metaSave"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="pathname" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Dumps DFS data structures into specified file.
See {@link ClientProtocol#metaSave(String)}
for more details.
@see ClientProtocol#metaSave(String)]]>
</doc>
</method>
<method name="finalizeUpgrade"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[@see ClientProtocol#finalizeUpgrade()]]>
</doc>
</method>
<method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[@see ClientProtocol#distributedUpgradeProgress(FSConstants.UpgradeAction)]]>
</doc>
</method>
<method name="mkdirs" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="mkdirs" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a directory (or hierarchy of directories) with the given
name and permission.
@param src The path of the directory being created
@param permission The permission of the directory being created.
If permission == null, use {@link FsPermission#getDefault()}.
@return True if the operation success.
@see ClientProtocol#mkdirs(String, FsPermission)]]>
</doc>
</method>
<method name="setTimes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="mtime" type="long"/>
<param name="atime" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[set the modification and access time of a file
@throws FileNotFoundException if the path is not a file]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[{@inheritDoc}]]>
</doc>
</method>
<field name="LOG" type="org.apache.commons.logging.Log"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="MAX_BLOCK_ACQUIRE_FAILURES" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="namenode" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[DFSClient can connect to a Hadoop Filesystem and
perform basic file tasks. It uses the ClientProtocol
to communicate with a NameNode daemon, and connects
directly to DataNodes to read/write block data.
Hadoop DFS users should obtain an instance of
DistributedFileSystem, which uses DFSClient to handle
filesystem tasks.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSClient -->
<!-- start class org.apache.hadoop.hdfs.DFSClient.BlockReader -->
<class name="DFSClient.BlockReader" extends="org.apache.hadoop.fs.FSInputChecker"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="read" return="int"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="buf" type="byte[]"/>
<param name="off" type="int"/>
<param name="len" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="skip" return="long"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="n" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="read" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="seekToNewSource" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="targetPos" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="seek"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="pos" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getChunkPosition" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="pos" type="long"/>
</method>
<method name="readChunk" return="int"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="pos" type="long"/>
<param name="buf" type="byte[]"/>
<param name="offset" type="int"/>
<param name="len" type="int"/>
<param name="checksumBuf" type="byte[]"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="newBlockReader" return="org.apache.hadoop.hdfs.DFSClient.BlockReader"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="sock" type="java.net.Socket"/>
<param name="file" type="java.lang.String"/>
<param name="blockId" type="long"/>
<param name="genStamp" type="long"/>
<param name="startOffset" type="long"/>
<param name="len" type="long"/>
<param name="bufferSize" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="newBlockReader" return="org.apache.hadoop.hdfs.DFSClient.BlockReader"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="sock" type="java.net.Socket"/>
<param name="file" type="java.lang.String"/>
<param name="blockId" type="long"/>
<param name="genStamp" type="long"/>
<param name="startOffset" type="long"/>
<param name="len" type="long"/>
<param name="bufferSize" type="int"/>
<param name="verifyChecksum" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Java Doc required]]>
</doc>
</method>
<method name="newBlockReader" return="org.apache.hadoop.hdfs.DFSClient.BlockReader"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="sock" type="java.net.Socket"/>
<param name="file" type="java.lang.String"/>
<param name="blockId" type="long"/>
<param name="genStamp" type="long"/>
<param name="startOffset" type="long"/>
<param name="len" type="long"/>
<param name="bufferSize" type="int"/>
<param name="verifyChecksum" type="boolean"/>
<param name="clientName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="close"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="readAll" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="buf" type="byte[]"/>
<param name="offset" type="int"/>
<param name="len" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[kind of like readFully(). Only reads as much as possible.
And allows use of protected readFully().]]>
</doc>
</method>
<doc>
<![CDATA[This is a wrapper around connection to datadone
and understands checksum, offset etc]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSClient.BlockReader -->
<!-- start class org.apache.hadoop.hdfs.DFSUtil -->
<class name="DFSUtil" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DFSUtil"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="isValidName" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<doc>
<![CDATA[Whether the pathname is valid. Currently prohibits relative paths,
and names which contain a ":" or "/"]]>
</doc>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSUtil -->
<!-- start class org.apache.hadoop.hdfs.DistributedFileSystem -->
<class name="DistributedFileSystem" extends="org.apache.hadoop.fs.FileSystem"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DistributedFileSystem"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="DistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
static="false" final="false" visibility="public"
deprecated="deprecated, no comment">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[@deprecated]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="deprecated, no comment">
<doc>
<![CDATA[@deprecated]]>
</doc>
</method>
<method name="getUri" return="java.net.URI"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="initialize"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="uri" type="java.net.URI"/>
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="checkPath"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<doc>
<![CDATA[Permit paths which explicitly specify the default port.]]>
</doc>
</method>
<method name="makeQualified" return="org.apache.hadoop.fs.Path"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<doc>
<![CDATA[Normalize paths that explicitly specify the default port.]]>
</doc>
</method>
<method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDefaultBlockSize" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDefaultReplication" return="short"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setWorkingDirectory"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="dir" type="org.apache.hadoop.fs.Path"/>
</method>
<method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[{@inheritDoc}]]>
</doc>
</method>
<method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="file" type="org.apache.hadoop.fs.FileStatus"/>
<param name="start" type="long"/>
<param name="len" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="setVerifyChecksum"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="verifyChecksum" type="boolean"/>
</method>
<method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="bufferSize" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="bufferSize" type="int"/>
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[This optional operation is not yet supported.]]>
</doc>
</method>
<method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
<param name="overwrite" type="boolean"/>
<param name="bufferSize" type="int"/>
<param name="replication" type="short"/>
<param name="blockSize" type="long"/>
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="setReplication" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="replication" type="short"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="rename" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="dst" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Rename files/dirs]]>
</doc>
</method>
<method name="delete" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get rid of Path f, whether a true file or dir.]]>
</doc>
</method>
<method name="delete" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="recursive" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[requires a boolean check to delete a non
empty directory recursively.]]>
</doc>
</method>
<method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[{@inheritDoc}]]>
</doc>
</method>
<method name="setQuota"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="namespaceQuota" type="long"/>
<param name="diskspaceQuota" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set a directory's quotas
@see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long)]]>
</doc>
</method>
<method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="mkdirs" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="close"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[{@inheritDoc}]]>
</doc>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getClient" return="org.apache.hadoop.hdfs.DFSClient"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDiskStatus" return="org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Return the disk usage of the filesystem, including total capacity,
used space, and remaining space]]>
</doc>
</method>
<method name="getRawCapacity" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Return the total raw capacity of the filesystem, disregarding
replication .]]>
</doc>
</method>
<method name="getRawUsed" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Return the total raw used space in the filesystem, disregarding
replication .]]>
</doc>
</method>
<method name="getMissingBlocksCount" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Returns count of blocks with no good replicas left. Normally should be
zero.
@throws IOException]]>
</doc>
</method>
<method name="getUnderReplicatedBlocksCount" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Returns count of blocks with one of more replica missing.
@throws IOException]]>
</doc>
</method>
<method name="getCorruptBlocksCount" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Returns count of blocks with at least one replica marked corrupt.
@throws IOException]]>
</doc>
</method>
<method name="getDataNodeStats" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Return statistics for each datanode.]]>
</doc>
</method>
<method name="setSafeMode" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Enter, leave or get safe mode.
@see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
FSConstants.SafeModeAction)]]>
</doc>
</method>
<method name="saveNamespace"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Save namespace image.
@see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()]]>
</doc>
</method>
<method name="refreshNodes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Refreshes the list of hosts and excluded hosts from the configured
files.]]>
</doc>
</method>
<method name="finalizeUpgrade"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Finalize previously upgraded files system state.
@throws IOException]]>
</doc>
</method>
<method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="metaSave"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="pathname" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="reportChecksumFailure" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
<param name="inPos" type="long"/>
<param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
<param name="sumsPos" type="long"/>
<doc>
<![CDATA[We need to find the blocks that didn't match. Likely only one
is corrupt but we will report both to the namenode. In the future,
we can consider figuring out exactly which block is corrupt.]]>
</doc>
</method>
<method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Returns the stat information about the file.
@throws FileNotFoundException if the file does not exist.]]>
</doc>
</method>
<method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[{@inheritDoc}]]>
</doc>
</method>
<method name="setPermission"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[{@inheritDoc }]]>
</doc>
</method>
<method name="setOwner"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<param name="username" type="java.lang.String"/>
<param name="groupname" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[{@inheritDoc }]]>
</doc>
</method>
<method name="setTimes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="p" type="org.apache.hadoop.fs.Path"/>
<param name="mtime" type="long"/>
<param name="atime" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[{@inheritDoc }]]>
</doc>
</method>
<doc>
<![CDATA[Implementation of the abstract FileSystem for the DFS system.
This object is the way end-user code interacts with a Hadoop
DistributedFileSystem.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.DistributedFileSystem -->
<!-- start class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
<class name="DistributedFileSystem.DiskStatus" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DistributedFileSystem.DiskStatus" type="long, long, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getCapacity" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDfsUsed" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getRemaining" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
<!-- start class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
<class name="HDFSPolicyProvider" extends="org.apache.hadoop.security.authorize.PolicyProvider"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="HDFSPolicyProvider"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getServices" return="org.apache.hadoop.security.authorize.Service[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[{@link PolicyProvider} for HDFS protocols.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
<!-- start class org.apache.hadoop.hdfs.HftpFileSystem -->
<class name="HftpFileSystem" extends="org.apache.hadoop.fs.FileSystem"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="HftpFileSystem"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="initialize"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.net.URI"/>
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="pickOneAddress" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="hostname" type="java.lang.String"/>
<exception name="UnknownHostException" type="java.net.UnknownHostException"/>
<doc>
<![CDATA[randomly pick one from all available IP addresses of a given hostname]]>
</doc>
</method>
<method name="getUri" return="java.net.URI"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="openConnection" return="java.net.HttpURLConnection"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="path" type="java.lang.String"/>
<param name="query" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Open an HTTP connection to the namenode to read file data and metadata.
@param path The path component of the URL
@param query The query component of the URL]]>
</doc>
</method>
<method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="buffersize" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[{@inheritDoc}]]>
</doc>
</method>
<method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setWorkingDirectory"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
</method>
<method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="bufferSize" type="int"/>
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[This optional operation is not yet supported.]]>
</doc>
</method>
<method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
<param name="overwrite" type="boolean"/>
<param name="bufferSize" type="int"/>
<param name="replication" type="short"/>
<param name="blockSize" type="long"/>
<param name="progress" type="org.apache.hadoop.util.Progressable"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="rename" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="dst" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="delete" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="delete" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="recursive" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="mkdirs" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="org.apache.hadoop.fs.Path"/>
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<field name="nnAddr" type="java.net.InetSocketAddress"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<field name="ugi" type="org.apache.hadoop.security.UserGroupInformation"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<field name="ran" type="java.util.Random"
transient="false" volatile="false"
static="false" final="true" visibility="protected"
deprecated="not deprecated">
</field>
<field name="df" type="java.text.SimpleDateFormat"
transient="false" volatile="false"
static="true" final="true" visibility="protected"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[An implementation of a protocol for accessing filesystems over HTTP.
The following implementation provides a limited, read-only interface
to a filesystem over HTTP.
@see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
@see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.HftpFileSystem -->
<!-- start class org.apache.hadoop.hdfs.HsftpFileSystem -->
<class name="HsftpFileSystem" extends="org.apache.hadoop.hdfs.HftpFileSystem"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="HsftpFileSystem"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="initialize"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.net.URI"/>
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="openConnection" return="java.net.HttpURLConnection"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="path" type="java.lang.String"/>
<param name="query" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="getUri" return="java.net.URI"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[An implementation of a protocol for accessing filesystems over HTTPS.
The following implementation provides a limited, read-only interface
to a filesystem over HTTPS.
@see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
@see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.HsftpFileSystem -->
<!-- start class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
<class name="HsftpFileSystem.DummyHostnameVerifier" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="protected"
deprecated="not deprecated">
<implements name="javax.net.ssl.HostnameVerifier"/>
<constructor name="HsftpFileSystem.DummyHostnameVerifier"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</constructor>
<method name="verify" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="hostname" type="java.lang.String"/>
<param name="session" type="javax.net.ssl.SSLSession"/>
</method>
<doc>
<![CDATA[Dummy hostname verifier that is used to bypass hostname checking]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
<doc>
<![CDATA[<p>A distributed implementation of {@link
org.apache.hadoop.fs.FileSystem}. This is loosely modelled after
Google's <a href="http://labs.google.com/papers/gfs.html">GFS</a>.</p>
<p>The most important difference is that unlike GFS, Hadoop DFS files
have strictly one writer at any one time. Bytes are always appended
to the end of the writer's stream. There is no notion of "record appends"
or "mutations" that are then checked or reordered. Writers simply emit
a byte stream. That byte stream is guaranteed to be stored in the
order written.</p>]]>
</doc>
</package>
<package name="org.apache.hadoop.hdfs.protocol">
<!-- start class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
<class name="AlreadyBeingCreatedException" extends="java.io.IOException"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="AlreadyBeingCreatedException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[The exception that happens when you ask to create a file that already
is being created, but is not closed yet.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
<!-- start class org.apache.hadoop.hdfs.protocol.Block -->
<class name="Block" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.io.Writable"/>
<implements name="java.lang.Comparable"/>
<constructor name="Block"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="Block" type="long, long, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="Block" type="long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="Block" type="org.apache.hadoop.hdfs.protocol.Block"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="Block" type="java.io.File, long, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Find the blockid from the given filename]]>
</doc>
</constructor>
<method name="isBlockFilename" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="f" type="java.io.File"/>
</method>
<method name="set"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="blkid" type="long"/>
<param name="len" type="long"/>
<param name="genStamp" type="long"/>
</method>
<method name="getBlockId" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setBlockId"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="bid" type="long"/>
</method>
<method name="getBlockName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getNumBytes" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setNumBytes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="len" type="long"/>
</method>
<method name="getGenerationStamp" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setGenerationStamp"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="stamp" type="long"/>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="write"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="out" type="java.io.DataOutput"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="readFields"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="in" type="java.io.DataInput"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="compareTo" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
<doc>
<![CDATA[{@inheritDoc}]]>
</doc>
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
<doc>
<![CDATA[{@inheritDoc}]]>
</doc>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[{@inheritDoc}]]>
</doc>
</method>
<field name="GRANDFATHER_GENERATION_STAMP" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[A Block is a Hadoop FS primitive, identified by a
long.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.Block -->
<!-- start class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
<class name="BlockListAsLongs" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="BlockListAsLongs" type="long[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Constructor
@param iBlockList - BlockListALongs create from this long[] parameter]]>
</doc>
</constructor>
<method name="convertToArrayLongs" return="long[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="blockArray" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
<doc>
<![CDATA[Converting a block[] to a long[]
@param blockArray - the input array block[]
@return the output array of long[]]]>
</doc>
</method>
<method name="getNumberOfBlocks" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The number of blocks
@return - the number of blocks]]>
</doc>
</method>
<method name="getBlockId" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="index" type="int"/>
<doc>
<![CDATA[The block-id of the indexTh block
@param index - the block whose block-id is desired
@return the block-id]]>
</doc>
</method>
<method name="getBlockLen" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="index" type="int"/>
<doc>
<![CDATA[The block-len of the indexTh block
@param index - the block whose block-len is desired
@return - the block-len]]>
</doc>
</method>
<method name="getBlockGenStamp" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="index" type="int"/>
<doc>
<![CDATA[The generation stamp of the indexTh block
@param index - the block whose block-len is desired
@return - the generation stamp]]>
</doc>
</method>
<doc>
<![CDATA[This class provides an interface for accessing list of blocks that
has been implemented as long[].
This class is usefull for block report. Rather than send block reports
as a Block[] we can send it as a long[].]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
<!-- start interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
<interface name="ClientDatanodeProtocol" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
<method name="recoverBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
<param name="keepLength" type="boolean"/>
<param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Start generation-stamp recovery for specified block
@param block the specified block
@param keepLength keep the block length
@param targets the list of possible locations of specified block
@return the new blockid if recovery successful and the generation stamp
got updated as part of the recovery, else returns null if the block id
not have any data and the block was deleted.
@throws IOException]]>
</doc>
</method>
<field name="LOG" type="org.apache.commons.logging.Log"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="versionID" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[3: add keepLength parameter.]]>
</doc>
</field>
<doc>
<![CDATA[An client-datanode protocol for block recovery]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
<!-- start interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
<interface name="ClientProtocol" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
<method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="offset" type="long"/>
<param name="length" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get locations of the blocks of the specified file within the specified range.
DataNode locations for each block are sorted by
the proximity to the client.
<p>
Return {@link LocatedBlocks} which contains
file length, blocks and their locations.
DataNode locations for each block are sorted by
the distance to the client's address.
<p>
The client will then have to contact
one of the indicated DataNodes to obtain the actual data.
@param src file name
@param offset range start offset
@param length range length
@return file length and array of blocks with their locations
@throws IOException]]>
</doc>
</method>
<method name="create"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
<param name="clientName" type="java.lang.String"/>
<param name="overwrite" type="boolean"/>
<param name="replication" type="short"/>
<param name="blockSize" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a new file entry in the namespace.
<p>
This will create an empty file specified by the source path.
The path should reflect a full path originated at the root.
The name-node does not have a notion of "current" directory for a client.
<p>
Once created, the file is visible and available for read to other clients.
Although, other clients cannot {@link #delete(String)}, re-create or
{@link #rename(String, String)} it until the file is completed
or explicitly as a result of lease expiration.
<p>
Blocks have a maximum size. Clients that intend to
create multi-block files must also use {@link #addBlock(String, String)}.
@param src path of the file being created.
@param masked masked permission.
@param clientName name of the current client.
@param overwrite indicates whether the file should be
overwritten if it already exists.
@param replication block replication factor.
@param blockSize maximum block size.
@throws AccessControlException if permission to create file is
denied by the system. As usually on the client side the exception will
be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
@throws QuotaExceededException if the file creation violates
any quota restriction
@throws IOException if other errors occur.]]>
</doc>
</method>
<method name="append" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="clientName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Append to the end of the file.
@param src path of the file being created.
@param clientName name of the current client.
@return information about the last partial block if any.
@throws AccessControlException if permission to append file is
denied by the system. As usually on the client side the exception will
be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
Allows appending to an existing file if the server is
configured with the parameter dfs.support.append set to true, otherwise
throws an IOException.
@throws IOException if other errors occur.]]>
</doc>
</method>
<method name="setReplication" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="replication" type="short"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set replication for an existing file.
<p>
The NameNode sets replication to the new value and returns.
The actual block replication is not expected to be performed during
this method call. The blocks will be populated or removed in the
background as the result of the routine block maintenance procedures.
@param src file name
@param replication new replication
@throws IOException
@return true if successful;
false if file does not exist or is a directory]]>
</doc>
</method>
<method name="setPermission"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set permissions for an existing file/directory.]]>
</doc>
</method>
<method name="setOwner"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="username" type="java.lang.String"/>
<param name="groupname" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set owner of a path (i.e. a file or a directory).
The parameters username and groupname cannot both be null.
@param src
@param username If it is null, the original username remains unchanged.
@param groupname If it is null, the original groupname remains unchanged.]]>
</doc>
</method>
<method name="abandonBlock"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
<param name="src" type="java.lang.String"/>
<param name="holder" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[The client can give up on a blcok by calling abandonBlock().
The client can then
either obtain a new block, or complete or abandon the file.
Any partial writes to the block will be discarded.]]>
</doc>
</method>
<method name="addBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="clientName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[A client that wants to write an additional block to the
indicated filename (which must currently be open for writing)
should call addBlock().
addBlock() allocates a new block and datanodes the block data
should be replicated to.
@return LocatedBlock allocated block information.]]>
</doc>
</method>
<method name="complete" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="clientName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[The client is done writing data to the given filename, and would
like to complete it.
The function returns whether the file has been closed successfully.
If the function returns false, the caller should try again.
A call to complete() will not return true until all the file's
blocks have been replicated the minimum number of times. Thus,
DataNode failures may cause a client to call complete() several
times before succeeding.]]>
</doc>
</method>
<method name="reportBadBlocks"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[The client wants to report corrupted blocks (blocks with specified
locations on datanodes).
@param blocks Array of located blocks to report]]>
</doc>
</method>
<method name="rename" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="dst" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Rename an item in the file system namespace.
@param src existing file or directory name.
@param dst new name.
@return true if successful, or false if the old name does not exist
or if the new name already belongs to the namespace.
@throws IOException if the new name is invalid.
@throws QuotaExceededException if the rename would violate
any quota restriction]]>
</doc>
</method>
<method name="delete" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Delete the given file or directory from the file system.
<p>
Any blocks belonging to the deleted files will be garbage-collected.
@param src existing name.
@return true only if the existing file or directory was actually removed
from the file system.]]>
</doc>
</method>
<method name="delete" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="recursive" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Delete the given file or directory from the file system.
<p>
same as delete but provides a way to avoid accidentally
deleting non empty directories programmatically.
@param src existing name
@param recursive if true deletes a non empty directory recursively,
else throws an exception.
@return true only if the existing file or directory was actually removed
from the file system.]]>
</doc>
</method>
<method name="mkdirs" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a directory (or hierarchy of directories) with the given
name and permission.
@param src The path of the directory being created
@param masked The masked permission of the directory being created
@return True if the operation success.
@throws {@link AccessControlException} if permission to create file is
denied by the system. As usually on the client side the exception will
be wraped into {@link org.apache.hadoop.ipc.RemoteException}.
@throws QuotaExceededException if the operation would violate
any quota restriction.]]>
</doc>
</method>
<method name="getListing" return="org.apache.hadoop.fs.FileStatus[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get a listing of the indicated directory]]>
</doc>
</method>
<method name="renewLease"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="clientName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Client programs can cause stateful changes in the NameNode
that affect other clients. A client may obtain a file and
neither abandon nor complete it. A client might hold a series
of locks that prevent other clients from proceeding.
Clearly, it would be bad if a client held a bunch of locks
that it never gave up. This can happen easily if the client
dies unexpectedly.
<p>
So, the NameNode will revoke the locks and live file-creates
for clients that it thinks have died. A client tells the
NameNode that it is still alive by periodically calling
renewLease(). If a certain amount of time passes since
the last call to renewLease(), the NameNode assumes the
client has died.]]>
</doc>
</method>
<method name="getStats" return="long[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get a set of statistics about the filesystem.
Right now, only three values are returned.
<ul>
<li> [0] contains the total storage capacity of the system, in bytes.</li>
<li> [1] contains the total used space of the system, in bytes.</li>
<li> [2] contains the available storage of the system, in bytes.</li>
<li> [3] contains number of under replicated blocks in the system.</li>
<li> [4] contains number of blocks with a corrupt replica. </li>
<li> [5] contains number of blocks without any good replicas left. </li>
</ul>
Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of
actual numbers to index into the array.]]>
</doc>
</method>
<method name="getDatanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get a report on the system's current datanodes.
One DatanodeInfo object is returned for each DataNode.
Return live datanodes if type is LIVE; dead datanodes if type is DEAD;
otherwise all datanodes if type is ALL.]]>
</doc>
</method>
<method name="getPreferredBlockSize" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="filename" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get the block size for the given file.
@param filename The name of the file
@return The number of bytes in each block
@throws IOException]]>
</doc>
</method>
<method name="setSafeMode" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Enter, leave or get safe mode.
<p>
Safe mode is a name node state when it
<ol><li>does not accept changes to name space (read-only), and</li>
<li>does not replicate or delete blocks.</li></ol>
<p>
Safe mode is entered automatically at name node startup.
Safe mode can also be entered manually using
{@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
<p>
At startup the name node accepts data node reports collecting
information about block locations.
In order to leave safe mode it needs to collect a configurable
percentage called threshold of blocks, which satisfy the minimal
replication condition.
The minimal replication condition is that each block must have at least
<tt>dfs.replication.min</tt> replicas.
When the threshold is reached the name node extends safe mode
for a configurable amount of time
to let the remaining data nodes to check in before it
will start replicating missing blocks.
Then the name node leaves safe mode.
<p>
If safe mode is turned on manually using
{@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
then the name node stays in safe mode until it is manually turned off
using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
Current state of the name node can be verified using
{@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
<h4>Configuration parameters:</h4>
<tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
<tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
<tt>dfs.replication.min</tt> is the minimal replication parameter.
<h4>Special cases:</h4>
The name node does not enter safe mode at startup if the threshold is
set to 0 or if the name space is empty.<br>
If the threshold is set to 1 then all blocks need to have at least
minimal replication.<br>
If the threshold value is greater than 1 then the name node will not be
able to turn off safe mode automatically.<br>
Safe mode can always be turned off manually.
@param action <ul> <li>0 leave safe mode;</li>
<li>1 enter safe mode;</li>
<li>2 get safe mode state.</li></ul>
@return <ul><li>0 if the safe mode is OFF or</li>
<li>1 if the safe mode is ON.</li></ul>
@throws IOException]]>
</doc>
</method>
<method name="saveNamespace"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Save namespace image.
<p>
Saves current namespace into storage directories and reset edits log.
Requires superuser privilege and safe mode.
@throws AccessControlException if the superuser privilege is violated.
@throws IOException if image creation failed.]]>
</doc>
</method>
<method name="refreshNodes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Tells the namenode to reread the hosts and exclude files.
@throws IOException]]>
</doc>
</method>
<method name="finalizeUpgrade"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Finalize previous upgrade.
Remove file system state saved during the upgrade.
The upgrade will become irreversible.
@throws IOException]]>
</doc>
</method>
<method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Report distributed upgrade progress or force current upgrade to proceed.
@param action {@link FSConstants.UpgradeAction} to perform
@return upgrade status information or null if no upgrades are in progress
@throws IOException]]>
</doc>
</method>
<method name="metaSave"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="filename" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Dumps namenode data structures into specified file. If file
already exists, then append.
@throws IOException]]>
</doc>
</method>
<method name="getFileInfo" return="org.apache.hadoop.fs.FileStatus"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get the file info for a specific file or directory.
@param src The string representation of the path to the file
@throws IOException if permission to access file is denied by the system
@return object containing information regarding the file
or null if file not found]]>
</doc>
</method>
<method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get {@link ContentSummary} rooted at the specified directory.
@param path The string representation of the path]]>
</doc>
</method>
<method name="setQuota"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="java.lang.String"/>
<param name="namespaceQuota" type="long"/>
<param name="diskspaceQuota" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set the quota for a directory.
@param path The string representation of the path to the directory
@param namespaceQuota Limit on the number of names in the tree rooted
at the directory
@param diskspaceQuota Limit on disk space occupied all the files under
this directory.
<br><br>
The quota can have three types of values : (1) 0 or more will set
the quota to that value, (2) {@link FSConstants#QUOTA_DONT_SET} implies
the quota will not be changed, and (3) {@link FSConstants#QUOTA_RESET}
implies the quota will be reset. Any other value is a runtime error.
@throws FileNotFoundException if the path is a file or
does not exist
@throws QuotaExceededException if the directory size
is greater than the given quota]]>
</doc>
</method>
<method name="fsync"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="client" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Write all metadata for this file into persistent storage.
The file must be currently open for writing.
@param src The string representation of the path
@param client The string representation of the client]]>
</doc>
</method>
<method name="setTimes"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="java.lang.String"/>
<param name="mtime" type="long"/>
<param name="atime" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Sets the modification and access time of the file to the specified time.
@param src The string representation of the path
@param mtime The number of milliseconds since Jan 1, 1970.
Setting mtime to -1 means that modification time should not be set
by this call.
@param atime The number of milliseconds since Jan 1, 1970.
Setting atime to -1 means that access time should not be set
by this call.]]>
</doc>
</method>
<field name="versionID" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Compared to the previous version the following changes have been introduced:
(Only the latest change is reflected.
The log of historical changes can be retrieved from the svn).
41: saveNamespace introduced.]]>
</doc>
</field>
<field name="GET_STATS_CAPACITY_IDX" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="GET_STATS_USED_IDX" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="GET_STATS_REMAINING_IDX" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="GET_STATS_UNDER_REPLICATED_IDX" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="GET_STATS_CORRUPT_BLOCKS_IDX" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="GET_STATS_MISSING_BLOCKS_IDX" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[ClientProtocol is used by user code via
{@link org.apache.hadoop.hdfs.DistributedFileSystem} class to communicate
with the NameNode. User code can manipulate the directory namespace,
as well as open/close file streams, etc.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
<!-- start class org.apache.hadoop.hdfs.protocol.DatanodeID -->
<class name="DatanodeID" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.io.WritableComparable"/>
<constructor name="DatanodeID"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Equivalent to DatanodeID("").]]>
</doc>
</constructor>
<constructor name="DatanodeID" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Equivalent to DatanodeID(nodeName, "", -1, -1).]]>
</doc>
</constructor>
<constructor name="DatanodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[DatanodeID copy constructor
@param from]]>
</doc>
</constructor>
<constructor name="DatanodeID" type="java.lang.String, java.lang.String, int, int"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Create DatanodeID
@param nodeName (hostname:portNumber)
@param storageID data storage ID
@param infoPort info server port
@param ipcPort ipc server port]]>
</doc>
</constructor>
<method name="getName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return hostname:portNumber.]]>
</doc>
</method>
<method name="getStorageID" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return data storage ID.]]>
</doc>
</method>
<method name="getInfoPort" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return infoPort (the port at which the HTTP server bound to)]]>
</doc>
</method>
<method name="getIpcPort" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return ipcPort (the port at which the IPC server bound to)]]>
</doc>
</method>
<method name="setStorageID"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="storageID" type="java.lang.String"/>
<doc>
<![CDATA[sets the data storage ID.]]>
</doc>
</method>
<method name="getHost" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return hostname and no :portNumber.]]>
</doc>
</method>
<method name="getPort" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="to" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="updateRegInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="nodeReg" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
<doc>
<![CDATA[Update fields when a new registration request comes in.
Note that this does not update storageID.]]>
</doc>
</method>
<method name="compareTo" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="that" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
<doc>
<![CDATA[Comparable.
Basis of compare is the String name (host:portNumber) only.
@param that
@return as specified by Comparable.]]>
</doc>
</method>
<method name="write"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="out" type="java.io.DataOutput"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[{@inheritDoc}]]>
</doc>
</method>
<method name="readFields"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="in" type="java.io.DataInput"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[{@inheritDoc}]]>
</doc>
</method>
<field name="EMPTY_ARRAY" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="name" type="java.lang.String"
transient="false" volatile="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</field>
<field name="storageID" type="java.lang.String"
transient="false" volatile="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</field>
<field name="infoPort" type="int"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<field name="ipcPort" type="int"
transient="false" volatile="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[DatanodeID is composed of the data node
name (hostname:portNumber) and the data storage ID,
which it currently represents.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.DatanodeID -->
<!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
<class name="DatanodeInfo" extends="org.apache.hadoop.hdfs.protocol.DatanodeID"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.net.Node"/>
<constructor name="DatanodeInfo"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</constructor>
<method name="getCapacity" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The raw capacity.]]>
</doc>
</method>
<method name="getDfsUsed" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The used space by the data node.]]>
</doc>
</method>
<method name="getNonDfsUsed" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The used space by the data node.]]>
</doc>
</method>
<method name="getDfsUsedPercent" return="float"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The used space by the data node as percentage of present capacity]]>
</doc>
</method>
<method name="getRemaining" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The raw free space.]]>
</doc>
</method>
<method name="getRemainingPercent" return="float"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The remaining space as percentage of configured capacity.]]>
</doc>
</method>
<method name="getLastUpdate" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The time when this information was accurate.]]>
</doc>
</method>
<method name="getXceiverCount" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[number of active connections]]>
</doc>
</method>
<method name="setCapacity"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="capacity" type="long"/>
<doc>
<![CDATA[Sets raw capacity.]]>
</doc>
</method>
<method name="setRemaining"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="remaining" type="long"/>
<doc>
<![CDATA[Sets raw free space.]]>
</doc>
</method>
<method name="setLastUpdate"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="lastUpdate" type="long"/>
<doc>
<![CDATA[Sets time when this information was accurate.]]>
</doc>
</method>
<method name="setXceiverCount"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="xceiverCount" type="int"/>
<doc>
<![CDATA[Sets number of active connections]]>
</doc>
</method>
<method name="getNetworkLocation" return="java.lang.String"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[rack name]]>
</doc>
</method>
<method name="setNetworkLocation"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="location" type="java.lang.String"/>
<doc>
<![CDATA[Sets the rack name]]>
</doc>
</method>
<method name="getHostName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="setHostName"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="host" type="java.lang.String"/>
</method>
<method name="getDatanodeReport" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[A formatted string for reporting the status of the DataNode.]]>
</doc>
</method>
<method name="dumpDatanode" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[A formatted string for printing the status of the DataNode.]]>
</doc>
</method>
<method name="startDecommission"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Start decommissioning a node.
old state.]]>
</doc>
</method>
<method name="stopDecommission"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Stop decommissioning a node.
old state.]]>
</doc>
</method>
<method name="isDecommissionInProgress" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns true if the node is in the process of being decommissioned]]>
</doc>
</method>
<method name="isDecommissioned" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Returns true if the node has been decommissioned.]]>
</doc>
</method>
<method name="setDecommissioned"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Sets the admin state to indicate that decommision is complete.]]>
</doc>
</method>
<method name="setAdminState"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<param name="newState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"/>
<doc>
<![CDATA[Sets the admin state of this node.]]>
</doc>
</method>
<method name="getParent" return="org.apache.hadoop.net.Node"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Return this node's parent]]>
</doc>
</method>
<method name="setParent"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="parent" type="org.apache.hadoop.net.Node"/>
</method>
<method name="getLevel" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Return this node's level in the tree.
E.g. the root of a tree returns 0 and its children return 1]]>
</doc>
</method>
<method name="setLevel"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="level" type="int"/>
</method>
<method name="write"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="out" type="java.io.DataOutput"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[{@inheritDoc}]]>
</doc>
</method>
<method name="readFields"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="in" type="java.io.DataInput"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[{@inheritDoc}]]>
</doc>
</method>
<field name="capacity" type="long"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<field name="dfsUsed" type="long"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<field name="remaining" type="long"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<field name="lastUpdate" type="long"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<field name="xceiverCount" type="int"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<field name="location" type="java.lang.String"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<field name="hostName" type="java.lang.String"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
<doc>
<![CDATA[HostName as suplied by the datanode during registration as its
name. Namenode uses datanode IP address as the name.]]>
</doc>
</field>
<field name="adminState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
transient="false" volatile="false"
static="false" final="false" visibility="protected"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[DatanodeInfo represents the status of a DataNode.
This object is used for communication in the
Datanode Protocol and the Client Protocol.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
<!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
<class name="DatanodeInfo.AdminStates" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<field name="NORMAL" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="DECOMMISSION_INPROGRESS" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="DECOMMISSIONED" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
<!-- start interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
<interface name="DataTransferProtocol" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<field name="DATA_TRANSFER_VERSION" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Version for data transfers between clients and datanodes
This should change when serialization of DatanodeInfo, not just
when protocol changes. It is not very obvious.]]>
</doc>
</field>
<field name="OP_WRITE_BLOCK" type="byte"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="OP_READ_BLOCK" type="byte"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="OP_READ_METADATA" type="byte"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="OP_REPLACE_BLOCK" type="byte"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="OP_COPY_BLOCK" type="byte"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="OP_BLOCK_CHECKSUM" type="byte"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="OP_STATUS_SUCCESS" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="OP_STATUS_ERROR" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="OP_STATUS_ERROR_CHECKSUM" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="OP_STATUS_ERROR_INVALID" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="OP_STATUS_ERROR_EXISTS" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="OP_STATUS_CHECKSUM_OK" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[The Client transfers data to/from datanode using a streaming protocol.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
<!-- start interface org.apache.hadoop.hdfs.protocol.FSConstants -->
<interface name="FSConstants" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<field name="MIN_BLOCKS_FOR_WRITE" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="BLOCK_INVALIDATE_CHUNK" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="QUOTA_DONT_SET" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="QUOTA_RESET" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="HEARTBEAT_INTERVAL" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="BLOCKREPORT_INTERVAL" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="BLOCKREPORT_INITIAL_DELAY" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="LEASE_SOFTLIMIT_PERIOD" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="LEASE_HARDLIMIT_PERIOD" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="LEASE_RECOVER_PERIOD" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="MAX_PATH_LENGTH" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="MAX_PATH_DEPTH" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="BUFFER_SIZE" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="SMALL_BUFFER_SIZE" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="DEFAULT_BLOCK_SIZE" type="long"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="DEFAULT_DATA_SOCKET_SIZE" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="SIZE_OF_INTEGER" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="LAYOUT_VERSION" type="int"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Some handy constants]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.protocol.FSConstants -->
<!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
<class name="FSConstants.DatanodeReportType" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<field name="ALL" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="LIVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="DEAD" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
<!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
<class name="FSConstants.SafeModeAction" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<field name="SAFEMODE_LEAVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="SAFEMODE_ENTER" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="SAFEMODE_GET" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
<!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
<class name="FSConstants.UpgradeAction" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<field name="GET_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="DETAILED_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="FORCE_PROCEED" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Distributed upgrade actions:
1. Get upgrade status.
2. Get detailed upgrade status.
3. Proceed with the upgrade if it is stuck, no matter what the status is.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
<!-- start class org.apache.hadoop.hdfs.protocol.LocatedBlock -->
<class name="LocatedBlock" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.io.Writable"/>
<constructor name="LocatedBlock"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long, boolean"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getLocations" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getStartOffset" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getBlockSize" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isCorrupt" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="write"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="out" type="java.io.DataOutput"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="readFields"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="in" type="java.io.DataInput"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<doc>