blob: e4915a72a53dadb27c0ef4b80bc32e25c52f9750 [file] [log] [blame]
<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
<!-- Generated by the JDiff Javadoc doclet -->
<!-- (http://www.jdiff.org) -->
<!-- on Mon Mar 30 15:30:43 PDT 2015 -->
<api
xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
xsi:noNamespaceSchemaLocation='api.xsd'
name="hadoop-hdfs 2.6.0"
jdversion="1.0.9">
<!-- Command line arguments = -doclet org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet -docletpath /Users/llu/hadoop-common/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/Users/llu/hadoop-common/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /Users/llu/hadoop-common/hadoop-hdfs-project/hadoop-hdfs/target/classes:/Users/llu/hadoop-common/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.6.0.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_67.jdk/Contents/Home/lib/tools.jar:/Users/llu/hadoop-common/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.6.0.jar:/Users/llu/.m2/repository/org/slf4j/slf4j-api/1.7.5/slf4j-api-1.7.5.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/llu/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/llu/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/llu/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/llu/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/llu/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/llu/.m2/repository/org/apache/curator/curator-framework/2.6.0/curator-framework-2.6.0.jar:/Users/llu/hadoop-common/hadoop-common-project/hadoop-common/target/hadoop-common-2.6.0.jar:/Users/llu/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/llu/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/llu/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/llu/.m2/repository/commons-collections/commons-collections/3.2.1/commons-collections-3.2.1.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/llu/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/llu/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/llu/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/llu/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/llu/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/llu/.m2/repository/tomcat/jasper-compiler/5.5.23/jasper-compiler-5.5.23.jar:/Users/llu/.m2/repository/commons-el/commons-el/1.0/commons-el-1.0.jar:/Users/llu/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/llu/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/llu/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/llu/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/llu/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/llu/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/llu/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/llu/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/llu/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/llu/.m2/repository/org/apache/curator/curator-client/2.6.0/curator-client-2.6.0.jar:/Users/llu/.m2/repository/org/apache/curator/curator-recipes/2.6.0/curator-recipes-2.6.0.jar:/Users/llu/.m2/repository/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar:/Users/llu/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/llu/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/llu/.m2/repository/jline/jline/0.9.94/jline-0.9.94.jar:/Users/llu/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/llu/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/llu/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/llu/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/llu/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/llu/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/llu/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/llu/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/llu/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/llu/.m2/repository/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/Users/llu/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/llu/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/llu/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/llu/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/llu/.m2/repository/org/slf4j/slf4j-log4j12/1.7.5/slf4j-log4j12-1.7.5.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/llu/.m2/repository/tomcat/jasper-runtime/5.5.23/jasper-runtime-5.5.23.jar:/Users/llu/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/llu/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/llu/.m2/repository/xerces/xercesImpl/2.9.1/xercesImpl-2.9.1.jar:/Users/llu/.m2/repository/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/Users/llu/.m2/repository/org/htrace/htrace-core/3.0.4/htrace-core-3.0.4.jar -sourcepath /Users/llu/hadoop-common/hadoop-hdfs-project/hadoop-hdfs/src/main/java -apidir /Users/llu/hadoop-common/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname hadoop-core 2.6.0 -->
<package name="org.apache.hadoop.fs">
<!-- start class org.apache.hadoop.fs.BlockStorageLocation -->
<class name="BlockStorageLocation" extends="org.apache.hadoop.fs.BlockLocation"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="BlockStorageLocation" type="org.apache.hadoop.fs.BlockLocation, org.apache.hadoop.fs.VolumeId[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<method name="getVolumeIds" return="org.apache.hadoop.fs.VolumeId[]"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Gets the list of {@link VolumeId} corresponding to the block's replicas.
@return volumeIds list of VolumeId for the block's replicas]]>
</doc>
</method>
<doc>
<![CDATA[Wrapper for {@link BlockLocation} that also adds {@link VolumeId} volume
location information for each replica.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.fs.BlockStorageLocation -->
<!-- start class org.apache.hadoop.fs.CacheFlag -->
<class name="CacheFlag" extends="java.lang.Enum"
abstract="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.fs.CacheFlag[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.fs.CacheFlag"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<doc>
<![CDATA[Specifies semantics for CacheDirective operations. Multiple flags can
be combined in an EnumSet.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.fs.CacheFlag -->
<!-- start class org.apache.hadoop.fs.HdfsVolumeId -->
<class name="HdfsVolumeId" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.fs.VolumeId"/>
<constructor name="HdfsVolumeId" type="byte[]"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="compareTo" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="arg0" type="org.apache.hadoop.fs.VolumeId"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[HDFS-specific volume identifier which implements {@link VolumeId}. Can be
used to differentiate between the data directories on a single datanode. This
identifier is only unique on a per-datanode basis.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.fs.HdfsVolumeId -->
<!-- start interface org.apache.hadoop.fs.VolumeId -->
<interface name="VolumeId" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="java.lang.Comparable"/>
<method name="compareTo" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="arg0" type="org.apache.hadoop.fs.VolumeId"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="obj" type="java.lang.Object"/>
</method>
<doc>
<![CDATA[Opaque interface that identifies a disk location. Subclasses
should implement {@link Comparable} and override both equals and hashCode.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.fs.VolumeId -->
<!-- start class org.apache.hadoop.fs.XAttr.Builder -->
<class name="XAttr.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="XAttr.Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="setNameSpace" return="org.apache.hadoop.fs.XAttr.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ns" type="org.apache.hadoop.fs.XAttr.NameSpace"/>
</method>
<method name="setName" return="org.apache.hadoop.fs.XAttr.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="setValue" return="org.apache.hadoop.fs.XAttr.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="value" type="byte[]"/>
</method>
<method name="build" return="org.apache.hadoop.fs.XAttr"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.fs.XAttr.Builder -->
<!-- start class org.apache.hadoop.fs.XAttr.NameSpace -->
<class name="XAttr.NameSpace" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.fs.XAttr.NameSpace[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.fs.XAttr.NameSpace"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.fs.XAttr.NameSpace -->
</package>
<package name="org.apache.hadoop.hdfs">
<!-- start interface org.apache.hadoop.hdfs.BlockReader -->
<interface name="BlockReader" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.fs.ByteBufferReadable"/>
<method name="read" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="buf" type="byte[]"/>
<param name="off" type="int"/>
<param name="len" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
</method>
<method name="skip" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="n" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Skip the given number of bytes]]>
</doc>
</method>
<method name="available" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Returns an estimate of the number of bytes that can be read
(or skipped over) from this input stream without performing
network I/O.
This may return more than what is actually present in the block.]]>
</doc>
</method>
<method name="close"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Close the block reader.
@throws IOException]]>
</doc>
</method>
<method name="readFully"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="buf" type="byte[]"/>
<param name="readOffset" type="int"/>
<param name="amtToRead" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Read exactly the given amount of data, throwing an exception
if EOF is reached before that amount]]>
</doc>
</method>
<method name="readAll" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="buf" type="byte[]"/>
<param name="offset" type="int"/>
<param name="len" type="int"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Similar to {@link #readFully(byte[], int, int)} except that it will
not throw an exception on EOF. However, it differs from the simple
{@link #read(byte[], int, int)} call in that it is guaranteed to
read the data if it is available. In other words, if this call
does not throw an exception, then either the buffer has been
filled or the next call will return EOF.]]>
</doc>
</method>
<method name="isLocal" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return true only if this is a local read.]]>
</doc>
</method>
<method name="isShortCircuit" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return true only if this is a short-circuit read.
All short-circuit reads are also local.]]>
</doc>
</method>
<method name="getClientMmap" return="org.apache.hadoop.hdfs.shortcircuit.ClientMmap"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="opts" type="java.util.EnumSet"/>
<doc>
<![CDATA[Get a ClientMmap object for this BlockReader.
@param opts The read options to use.
@return The ClientMmap object, or null if mmap is not
supported.]]>
</doc>
</method>
<doc>
<![CDATA[A BlockReader is responsible for reading a single block
from a single datanode.]]>
</doc>
</interface>
<!-- end interface org.apache.hadoop.hdfs.BlockReader -->
<!-- start class org.apache.hadoop.hdfs.BlockReaderFactory.BlockReaderPeer -->
<class name="BlockReaderFactory.BlockReaderPeer" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</class>
<!-- end class org.apache.hadoop.hdfs.BlockReaderFactory.BlockReaderPeer -->
<!-- start class org.apache.hadoop.hdfs.CorruptFileBlockIterator -->
<class name="CorruptFileBlockIterator" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<implements name="org.apache.hadoop.fs.RemoteIterator"/>
<constructor name="CorruptFileBlockIterator" type="org.apache.hadoop.hdfs.DFSClient, org.apache.hadoop.fs.Path"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<method name="getCallsMade" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return the number of calls made to the DFSClient.
This is for debugging and testing purposes.]]>
</doc>
</method>
<method name="hasNext" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="next" return="org.apache.hadoop.fs.Path"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</method>
<doc>
<![CDATA[Provides an iterator interface for listCorruptFileBlocks.
This class is used by DistributedFileSystem and Hdfs.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.CorruptFileBlockIterator -->
<!-- start class org.apache.hadoop.hdfs.DFSClient.Conf -->
<class name="DFSClient.Conf" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DFSClient.Conf" type="org.apache.hadoop.conf.Configuration"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="isUseLegacyBlockReaderLocal" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDomainSocketPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isShortCircuitLocalReads" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isDomainSocketDataTraffic" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[DFSClient configuration]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSClient.Conf -->
<!-- start class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
<class name="DFSClient.DFSDataInputStream" extends="org.apache.hadoop.hdfs.client.HdfsDataInputStream"
abstract="false"
static="true" final="false" visibility="public"
deprecated="use {@link HdfsDataInputStream} instead.">
<constructor name="DFSClient.DFSDataInputStream" type="org.apache.hadoop.hdfs.DFSInputStream"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<doc>
<![CDATA[@deprecated use {@link HdfsDataInputStream} instead.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
<!-- start class org.apache.hadoop.hdfs.DFSHedgedReadMetrics -->
<class name="DFSHedgedReadMetrics" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DFSHedgedReadMetrics"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="incHedgedReadOps"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incHedgedReadOpsInCurThread"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="incHedgedReadWins"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getHedgedReadOps" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getHedgedReadOpsInCurThread" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getHedgedReadWins" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="hedgedReadOps" type="java.util.concurrent.atomic.AtomicLong"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="hedgedReadOpsWin" type="java.util.concurrent.atomic.AtomicLong"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="hedgedReadOpsInCurThread" type="java.util.concurrent.atomic.AtomicLong"
transient="false" volatile="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[The client-side metrics for hedged read feature.
This class has a number of metrics variables that are publicly accessible,
we can grab them from client side, like HBase.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSHedgedReadMetrics -->
<!-- start class org.apache.hadoop.hdfs.DFSInotifyEventInputStream -->
<class name="DFSInotifyEventInputStream" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="poll" return="org.apache.hadoop.hdfs.inotify.Event"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<exception name="MissingEventsException" type="org.apache.hadoop.hdfs.inotify.MissingEventsException"/>
<doc>
<![CDATA[Returns the next event in the stream or null if no new events are currently
available.
@throws IOException because of network error or edit log
corruption. Also possible if JournalNodes are unresponsive in the
QJM setting (even one unresponsive JournalNode is enough in rare cases),
so catching this exception and retrying at least a few times is
recommended.
@throws MissingEventsException if we cannot return the next event in the
stream because the data for the event (and possibly some subsequent events)
has been deleted (generally because this stream is a very large number of
events behind the current state of the NameNode). It is safe to continue
reading from the stream after this exception is thrown -- the next
available event will be returned.]]>
</doc>
</method>
<method name="getEventsBehindEstimate" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Return a estimate of how many events behind the NameNode's current state
this stream is. Clients should periodically call this method and check if
its result is steadily increasing, which indicates that they are falling
behind (i.e. events are being generated faster than the client is reading
them). If a client falls too far behind events may be deleted before the
client can read them.
<p/>
A return value of -1 indicates that an estimate could not be produced, and
should be ignored. The value returned by this method is really only useful
when compared to previous or subsequent returned values.]]>
</doc>
</method>
<method name="poll" return="org.apache.hadoop.hdfs.inotify.Event"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="time" type="long"/>
<param name="tu" type="java.util.concurrent.TimeUnit"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<exception name="MissingEventsException" type="org.apache.hadoop.hdfs.inotify.MissingEventsException"/>
<doc>
<![CDATA[Returns the next event in the stream, waiting up to the specified amount of
time for a new event. Returns null if a new event is not available at the
end of the specified amount of time. The time before the method returns may
exceed the specified amount of time by up to the time required for an RPC
to the NameNode.
@param time number of units of the given TimeUnit to wait
@param tu the desired TimeUnit
@throws IOException see {@link DFSInotifyEventInputStream#poll()}
@throws MissingEventsException
see {@link DFSInotifyEventInputStream#poll()}
@throws InterruptedException if the calling thread is interrupted]]>
</doc>
</method>
<method name="take" return="org.apache.hadoop.hdfs.inotify.Event"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<exception name="InterruptedException" type="java.lang.InterruptedException"/>
<exception name="MissingEventsException" type="org.apache.hadoop.hdfs.inotify.MissingEventsException"/>
<doc>
<![CDATA[Returns the next event in the stream, waiting indefinitely if a new event
is not immediately available.
@throws IOException see {@link DFSInotifyEventInputStream#poll()}
@throws MissingEventsException see
{@link DFSInotifyEventInputStream#poll()}
@throws InterruptedException if the calling thread is interrupted]]>
</doc>
</method>
<field name="LOG" type="org.slf4j.Logger"
transient="false" volatile="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Stream for reading inotify events. DFSInotifyEventInputStreams should not
be shared among multiple threads.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSInotifyEventInputStream -->
<!-- start class org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics -->
<class name="DFSInputStream.ReadStatistics" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="DFSInputStream.ReadStatistics"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="DFSInputStream.ReadStatistics" type="org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getTotalBytesRead" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The total bytes read. This will always be at least as
high as the other numbers, since it includes all of them.]]>
</doc>
</method>
<method name="getTotalLocalBytesRead" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The total local bytes read. This will always be at least
as high as totalShortCircuitBytesRead, since all short-circuit
reads are also local.]]>
</doc>
</method>
<method name="getTotalShortCircuitBytesRead" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The total short-circuit local bytes read.]]>
</doc>
</method>
<method name="getTotalZeroCopyBytesRead" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The total number of zero-copy bytes read.]]>
</doc>
</method>
<method name="getRemoteBytesRead" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The total number of bytes read which were not local.]]>
</doc>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics -->
<!-- start class org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress -->
<class name="DFSUtil.ConfiguredNNAddress" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getNameserviceId" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getNamenodeId" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getAddress" return="java.net.InetSocketAddress"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Represent one of the NameNodes configured in the cluster.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress -->
<!-- start class org.apache.hadoop.hdfs.ExtendedBlockId -->
<class name="ExtendedBlockId" extends="java.lang.Object"
abstract="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<constructor name="ExtendedBlockId" type="long, java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="fromExtendedBlock" return="org.apache.hadoop.hdfs.ExtendedBlockId"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="block" type="org.apache.hadoop.hdfs.protocol.ExtendedBlock"/>
</method>
<method name="getBlockId" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getBlockPoolId" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[An immutable key which identifies a block.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.ExtendedBlockId -->
<!-- start class org.apache.hadoop.hdfs.HAUtil -->
<class name="HAUtil" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="isHAEnabled" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nsId" type="java.lang.String"/>
<doc>
<![CDATA[Returns true if HA for namenode is configured for the given nameservice
@param conf Configuration
@param nsId nameservice, or null if no federated NS is configured
@return true if HA is configured in the configuration; else false.]]>
</doc>
</method>
<method name="usesSharedEditsDir" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<doc>
<![CDATA[Returns true if HA is using a shared edits directory.
@param conf Configuration
@return true if HA config is using a shared edits dir, false otherwise.]]>
</doc>
</method>
<method name="getNameNodeId" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nsId" type="java.lang.String"/>
<doc>
<![CDATA[Get the namenode Id by matching the {@code addressKey}
with the the address of the local node.
If {@link DFSConfigKeys#DFS_HA_NAMENODE_ID_KEY} is not specifically
configured, this method determines the namenode Id by matching the local
node's address with the configured addresses. When a match is found, it
returns the namenode Id from the corresponding configuration key.
@param conf Configuration
@return namenode Id on success, null on failure.
@throws HadoopIllegalArgumentException on error]]>
</doc>
</method>
<method name="getNameNodeIdFromAddress" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="address" type="java.net.InetSocketAddress"/>
<param name="keys" type="java.lang.String[]"/>
<doc>
<![CDATA[Similar to
{@link DFSUtil#getNameServiceIdFromAddress(Configuration,
InetSocketAddress, String...)}]]>
</doc>
</method>
<method name="getNameNodeIdOfOtherNode" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nsId" type="java.lang.String"/>
<doc>
<![CDATA[Get the NN ID of the other node in an HA setup.
@param conf the configuration of this node
@return the NN ID of the other node in this nameservice]]>
</doc>
</method>
<method name="getConfForOtherNode" return="org.apache.hadoop.conf.Configuration"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="myConf" type="org.apache.hadoop.conf.Configuration"/>
<doc>
<![CDATA[Given the configuration for this node, return a Configuration object for
the other node in an HA setup.
@param myConf the configuration of this node
@return the configuration of the other node in an HA setup]]>
</doc>
</method>
<method name="shouldAllowStandbyReads" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<doc>
<![CDATA[This is used only by tests at the moment.
@return true if the NN should allow read operations while in standby mode.]]>
</doc>
</method>
<method name="setAllowStandbyReads"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="val" type="boolean"/>
</method>
<method name="isLogicalUri" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<doc>
<![CDATA[@return true if the given nameNodeUri appears to be a logical URI.]]>
</doc>
</method>
<method name="isClientFailoverConfigured" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<doc>
<![CDATA[Check whether the client has a failover proxy provider configured
for the namenode/nameservice.
@param conf Configuration
@param nameNodeUri The URI of namenode
@return true if failover is configured.]]>
</doc>
</method>
<method name="useLogicalUri" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Check whether logical URI is needed for the namenode and
the corresponding failover proxy provider in the config.
@param conf Configuration
@param nameNodeUri The URI of namenode
@return true if logical URI is needed. false, if not needed.
@throws IOException most likely due to misconfiguration.]]>
</doc>
</method>
<method name="getServiceUriFromToken" return="java.net.URI"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="scheme" type="java.lang.String"/>
<param name="token" type="org.apache.hadoop.security.token.Token"/>
<doc>
<![CDATA[Parse the file system URI out of the provided token.]]>
</doc>
</method>
<method name="buildTokenServiceForLogicalUri" return="org.apache.hadoop.io.Text"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="uri" type="java.net.URI"/>
<param name="scheme" type="java.lang.String"/>
<doc>
<![CDATA[Get the service name used in the delegation token for the given logical
HA service.
@param uri the logical URI of the cluster
@param scheme the scheme of the corresponding FileSystem
@return the service name]]>
</doc>
</method>
<method name="isTokenForLogicalUri" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="token" type="org.apache.hadoop.security.token.Token"/>
<doc>
<![CDATA[@return true if this token corresponds to a logical nameservice
rather than a specific namenode.]]>
</doc>
</method>
<method name="buildTokenServicePrefixForLogicalUri" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="scheme" type="java.lang.String"/>
</method>
<method name="cloneDelegationTokenForLogicalUri"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="haUri" type="java.net.URI"/>
<param name="nnAddrs" type="java.util.Collection"/>
<doc>
<![CDATA[Locate a delegation token associated with the given HA cluster URI, and if
one is found, clone it to also represent the underlying namenode address.
@param ugi the UGI to modify
@param haUri the logical URI for the cluster
@param nnAddrs collection of NNs in the cluster to which the token
applies]]>
</doc>
</method>
<method name="getAddressOfActive" return="java.net.InetSocketAddress"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get the internet address of the currently-active NN. This should rarely be
used, since callers of this method who connect directly to the NN using the
resulting InetSocketAddress will not be able to connect to the active NN if
a failover were to occur after this method has been called.
@param fs the file system to get the active address of.
@return the internet address of the currently-active NN.
@throws IOException if an error occurs while resolving the active NN.]]>
</doc>
</method>
<method name="getProxiesForAllNameNodesInNameservice" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nsId" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
call should be made on every NN in an HA nameservice, not just the active.
@param conf configuration
@param nsId the nameservice to get all of the proxies for.
@return a list of RPC proxies for each NN in the nameservice.
@throws IOException in the event of error.]]>
</doc>
</method>
<method name="getProxiesForAllNameNodesInNameservice" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nsId" type="java.lang.String"/>
<param name="xface" type="java.lang.Class"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
call should be made on every NN in an HA nameservice, not just the active.
@param conf configuration
@param nsId the nameservice to get all of the proxies for.
@param xface the protocol class.
@return a list of RPC proxies for each NN in the nameservice.
@throws IOException in the event of error.]]>
</doc>
</method>
<method name="isAtLeastOneActive" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="namenodes" type="java.util.List"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Used to ensure that at least one of the given HA NNs is currently in the
active state..
@param namenodes list of RPC proxies for each NN to check.
@return true if at least one NN is active, false if all are in the standby state.
@throws IOException in the event of error.]]>
</doc>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.HAUtil -->
<!-- start class org.apache.hadoop.hdfs.NameNodeProxies -->
<class name="NameNodeProxies" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="NameNodeProxies"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="createProxy" return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<param name="xface" type="java.lang.Class"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Creates the namenode proxy with the passed protocol. This will handle
creation of either HA- or non-HA-enabled proxy objects, depending upon
if the provided URI is a configured logical URI.
@param conf the configuration containing the required IPC
properties, client failover configurations, etc.
@param nameNodeUri the URI pointing either to a specific NameNode
or to a logical nameservice.
@param xface the IPC interface which should be created
@return an object containing both the proxy and the associated
delegation token service it corresponds to
@throws IOException if there is an error creating the proxy]]>
</doc>
</method>
<method name="createProxy" return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<param name="xface" type="java.lang.Class"/>
<param name="fallbackToSimpleAuth" type="java.util.concurrent.atomic.AtomicBoolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Creates the namenode proxy with the passed protocol. This will handle
creation of either HA- or non-HA-enabled proxy objects, depending upon
if the provided URI is a configured logical URI.
@param conf the configuration containing the required IPC
properties, client failover configurations, etc.
@param nameNodeUri the URI pointing either to a specific NameNode
or to a logical nameservice.
@param xface the IPC interface which should be created
@param fallbackToSimpleAuth set to true or false during calls to indicate if
a secure client falls back to simple auth
@return an object containing both the proxy and the associated
delegation token service it corresponds to
@throws IOException if there is an error creating the proxy]]>
</doc>
</method>
<method name="createProxyWithLossyRetryHandler" return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="config" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<param name="xface" type="java.lang.Class"/>
<param name="numResponseToDrop" type="int"/>
<param name="fallbackToSimpleAuth" type="java.util.concurrent.atomic.AtomicBoolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Generate a dummy namenode proxy instance that utilizes our hacked
{@link LossyRetryInvocationHandler}. Proxy instance generated using this
method will proactively drop RPC responses. Currently this method only
support HA setup. null will be returned if the given configuration is not
for HA.
@param config the configuration containing the required IPC
properties, client failover configurations, etc.
@param nameNodeUri the URI pointing either to a specific NameNode
or to a logical nameservice.
@param xface the IPC interface which should be created
@param numResponseToDrop The number of responses to drop for each RPC call
@param fallbackToSimpleAuth set to true or false during calls to indicate if
a secure client falls back to simple auth
@return an object containing both the proxy and the associated
delegation token service it corresponds to. Will return null of the
given configuration does not support HA.
@throws IOException if there is an error creating the proxy]]>
</doc>
</method>
<method name="createNonHAProxy" return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nnAddr" type="java.net.InetSocketAddress"/>
<param name="xface" type="java.lang.Class"/>
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="withRetries" type="boolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Creates an explicitly non-HA-enabled proxy object. Most of the time you
don't want to use this, and should instead use {@link NameNodeProxies#createProxy}.
@param conf the configuration object
@param nnAddr address of the remote NN to connect to
@param xface the IPC interface which should be created
@param ugi the user who is making the calls on the proxy object
@param withRetries certain interfaces have a non-standard retry policy
@return an object containing both the proxy and the associated
delegation token service it corresponds to
@throws IOException]]>
</doc>
</method>
<method name="createNonHAProxy" return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nnAddr" type="java.net.InetSocketAddress"/>
<param name="xface" type="java.lang.Class"/>
<param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
<param name="withRetries" type="boolean"/>
<param name="fallbackToSimpleAuth" type="java.util.concurrent.atomic.AtomicBoolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Creates an explicitly non-HA-enabled proxy object. Most of the time you
don't want to use this, and should instead use {@link NameNodeProxies#createProxy}.
@param conf the configuration object
@param nnAddr address of the remote NN to connect to
@param xface the IPC interface which should be created
@param ugi the user who is making the calls on the proxy object
@param withRetries certain interfaces have a non-standard retry policy
@param fallbackToSimpleAuth - set to true or false during this method to
indicate if a secure client falls back to simple auth
@return an object containing both the proxy and the associated
delegation token service it corresponds to
@throws IOException]]>
</doc>
</method>
<method name="getFailoverProxyProviderClass" return="java.lang.Class"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Gets the configured Failover proxy provider's class]]>
</doc>
</method>
<method name="createFailoverProxyProvider" return="org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="conf" type="org.apache.hadoop.conf.Configuration"/>
<param name="nameNodeUri" type="java.net.URI"/>
<param name="xface" type="java.lang.Class"/>
<param name="checkPort" type="boolean"/>
<param name="fallbackToSimpleAuth" type="java.util.concurrent.atomic.AtomicBoolean"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Creates the Failover proxy provider instance]]>
</doc>
</method>
<doc>
<![CDATA[Create proxy objects to communicate with a remote NN. All remote access to an
NN should be funneled through this class. Most of the time you'll want to use
{@link NameNodeProxies#createProxy(Configuration, URI, Class)}, which will
create either an HA- or non-HA-enabled client proxy as appropriate.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.NameNodeProxies -->
<!-- start class org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo -->
<class name="NameNodeProxies.ProxyAndInfo" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="NameNodeProxies.ProxyAndInfo" type="PROXYTYPE, org.apache.hadoop.io.Text, java.net.InetSocketAddress"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getProxy" return="PROXYTYPE"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDelegationTokenService" return="org.apache.hadoop.io.Text"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getAddress" return="java.net.InetSocketAddress"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Wrapper for a client proxy as well as its associated service ID.
This is simply used as a tuple-like return type for
{@link NameNodeProxies#createProxy} and
{@link NameNodeProxies#createNonHAProxy}.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo -->
<!-- start interface org.apache.hadoop.hdfs.RemotePeerFactory -->
<interface name="RemotePeerFactory" abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="newConnectedPeer" return="org.apache.hadoop.hdfs.net.Peer"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="addr" type="java.net.InetSocketAddress"/>
<param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
<param name="datanodeId" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[@param addr The address to connect to.
@param blockToken Token used during optional SASL negotiation
@param datanodeId ID of destination DataNode
@return A new Peer connected to the address.
@throws IOException If there was an error connecting or creating
the remote socket, encrypted stream, etc.]]>
</doc>
</method>
</interface>
<!-- end interface org.apache.hadoop.hdfs.RemotePeerFactory -->
<!-- start class org.apache.hadoop.hdfs.StorageType -->
<class name="StorageType" extends="java.lang.Enum"
abstract="false"
static="false" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.StorageType[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.StorageType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
<method name="isTransient" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isMovable" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="asList" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getMovableTypes" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<field name="DEFAULT" type="org.apache.hadoop.hdfs.StorageType"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<field name="EMPTY_ARRAY" type="org.apache.hadoop.hdfs.StorageType[]"
transient="false" volatile="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
</field>
<doc>
<![CDATA[Defines the types of supported storage media. The default storage
medium is assumed to be DISK.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.StorageType -->
<!-- start class org.apache.hadoop.hdfs.UnknownCipherSuiteException -->
<class name="UnknownCipherSuiteException" extends="java.io.IOException"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="UnknownCipherSuiteException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<doc>
<![CDATA[Thrown when an unknown cipher suite is encountered.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.UnknownCipherSuiteException -->
<!-- start class org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException -->
<class name="UnknownCryptoProtocolVersionException" extends="java.io.IOException"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="UnknownCryptoProtocolVersionException"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="UnknownCryptoProtocolVersionException" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
</class>
<!-- end class org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException -->
<doc>
<![CDATA[<p>A distributed implementation of {@link
org.apache.hadoop.fs.FileSystem}. This is loosely modelled after
Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
<p>The most important difference is that unlike GFS, Hadoop DFS files
have strictly one writer at any one time. Bytes are always appended
to the end of the writer's stream. There is no notion of "record appends"
or "mutations" that are then checked or reordered. Writers simply emit
a byte stream. That byte stream is guaranteed to be stored in the
order written.</p>]]>
</doc>
</package>
<package name="org.apache.hadoop.hdfs.client">
<!-- start class org.apache.hadoop.hdfs.client.HdfsAdmin -->
<class name="HdfsAdmin" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="HdfsAdmin" type="java.net.URI, org.apache.hadoop.conf.Configuration"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Create a new HdfsAdmin client.
@param uri the unique URI of the HDFS file system to administer
@param conf configuration
@throws IOException in the event the file system could not be created]]>
</doc>
</constructor>
<method name="setQuota"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="quota" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set the namespace quota (count of files, directories, and sym links) for a
directory.
@param src the path to set the quota for
@param quota the value to set for the quota
@throws IOException in the event of error]]>
</doc>
</method>
<method name="clearQuota"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Clear the namespace quota (count of files, directories and sym links) for a
directory.
@param src the path to clear the quota of
@throws IOException in the event of error]]>
</doc>
</method>
<method name="setSpaceQuota"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="spaceQuota" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set the disk space quota (size of files) for a directory. Note that
directories and sym links do not occupy disk space.
@param src the path to set the space quota of
@param spaceQuota the value to set for the space quota
@throws IOException in the event of error]]>
</doc>
</method>
<method name="clearSpaceQuota"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Clear the disk space quota (size of files) for a directory. Note that
directories and sym links do not occupy disk space.
@param src the path to clear the space quota of
@throws IOException in the event of error]]>
</doc>
</method>
<method name="allowSnapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Allow snapshot on a directory.
@param path The path of the directory where snapshots will be taken.]]>
</doc>
</method>
<method name="disallowSnapshot"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Disallow snapshot on a directory.
@param path The path of the snapshottable directory.]]>
</doc>
</method>
<method name="addCacheDirective" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
<param name="flags" type="java.util.EnumSet"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Add a new CacheDirectiveInfo.
@param info Information about a directive to add.
@param flags {@link CacheFlag}s to use for this operation.
@return the ID of the directive that was created.
@throws IOException if the directive could not be added]]>
</doc>
</method>
<method name="modifyCacheDirective"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
<param name="flags" type="java.util.EnumSet"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Modify a CacheDirective.
@param info Information about the directive to modify. You must set the ID
to indicate which CacheDirective you want to modify.
@param flags {@link CacheFlag}s to use for this operation.
@throws IOException if the directive could not be modified]]>
</doc>
</method>
<method name="removeCacheDirective"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="id" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Remove a CacheDirective.
@param id identifier of the CacheDirectiveInfo to remove
@throws IOException if the directive could not be removed]]>
</doc>
</method>
<method name="listCacheDirectives" return="org.apache.hadoop.fs.RemoteIterator"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="filter" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[List cache directives. Incrementally fetches results from the server.
@param filter Filter parameters to use when listing the directives, null to
list all directives visible to us.
@return A RemoteIterator which returns CacheDirectiveInfo objects.]]>
</doc>
</method>
<method name="addCachePool"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CachePoolInfo"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Add a cache pool.
@param info
The request to add a cache pool.
@throws IOException
If the request could not be completed.]]>
</doc>
</method>
<method name="modifyCachePool"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="info" type="org.apache.hadoop.hdfs.protocol.CachePoolInfo"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Modify an existing cache pool.
@param info
The request to modify a cache pool.
@throws IOException
If the request could not be completed.]]>
</doc>
</method>
<method name="removeCachePool"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="poolName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Remove a cache pool.
@param poolName
Name of the cache pool to remove.
@throws IOException
if the cache pool did not exist, or could not be removed.]]>
</doc>
</method>
<method name="listCachePools" return="org.apache.hadoop.fs.RemoteIterator"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[List all cache pools.
@return A remote iterator from which you can get CachePoolEntry objects.
Requests will be made as needed.
@throws IOException
If there was an error listing cache pools.]]>
</doc>
</method>
<method name="createEncryptionZone"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<param name="keyName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
<exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
<doc>
<![CDATA[Create an encryption zone rooted at an empty existing directory, using the
specified encryption key. An encryption zone has an associated encryption
key used when reading and writing files within the zone.
@param path The path of the root of the encryption zone. Must refer to
an empty, existing directory.
@param keyName Name of key available at the KeyProvider.
@throws IOException if there was a general IO exception
@throws AccessControlException if the caller does not have access to path
@throws FileNotFoundException if the path does not exist]]>
</doc>
</method>
<method name="getEncryptionZoneForPath" return="org.apache.hadoop.hdfs.protocol.EncryptionZone"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="org.apache.hadoop.fs.Path"/>
<exception name="IOException" type="java.io.IOException"/>
<exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
<exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
<doc>
<![CDATA[Get the path of the encryption zone for a given file or directory.
@param path The path to get the ez for.
@return The EncryptionZone of the ez, or null if path is not in an ez.
@throws IOException if there was a general IO exception
@throws AccessControlException if the caller does not have access to path
@throws FileNotFoundException if the path does not exist]]>
</doc>
</method>
<method name="listEncryptionZones" return="org.apache.hadoop.fs.RemoteIterator"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Returns a RemoteIterator which can be used to list the encryption zones
in HDFS. For large numbers of encryption zones, the iterator will fetch
the list of zones in a number of small batches.
<p/>
Since the list is fetched in batches, it does not represent a
consistent snapshot of the entire list of encryption zones.
<p/>
This method can only be called by HDFS superusers.]]>
</doc>
</method>
<method name="getInotifyEventStream" return="org.apache.hadoop.hdfs.DFSInotifyEventInputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Exposes a stream of namesystem events. Only events occurring after the
stream is created are available.
See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream}
for information on stream usage.
See {@link org.apache.hadoop.hdfs.inotify.Event}
for information on the available events.
<p/>
Inotify users may want to tune the following HDFS parameters to
ensure that enough extra HDFS edits are saved to support inotify clients
that fall behind the current state of the namespace while reading events.
The default parameter values should generally be reasonable. If edits are
deleted before their corresponding events can be read, clients will see a
{@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on
{@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls.
It should generally be sufficient to tune these parameters:
dfs.namenode.num.extra.edits.retained
dfs.namenode.max.extra.edits.segments.retained
Parameters that affect the number of created segments and the number of
edits that are considered necessary, i.e. do not count towards the
dfs.namenode.num.extra.edits.retained quota):
dfs.namenode.checkpoint.period
dfs.namenode.checkpoint.txns
dfs.namenode.num.checkpoints.retained
dfs.ha.log-roll.period
<p/>
It is recommended that local journaling be configured
(dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
so that edit transfers from the shared journal can be avoided.
@throws IOException If there was an error obtaining the stream.]]>
</doc>
</method>
<method name="getInotifyEventStream" return="org.apache.hadoop.hdfs.DFSInotifyEventInputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="lastReadTxid" type="long"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[A version of {@link HdfsAdmin#getInotifyEventStream()} meant for advanced
users who are aware of HDFS edits up to lastReadTxid (e.g. because they
have access to an FSImage inclusive of lastReadTxid) and only want to read
events after this point.]]>
</doc>
</method>
<method name="setStoragePolicy"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="src" type="org.apache.hadoop.fs.Path"/>
<param name="policyName" type="java.lang.String"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Set the source path to the specified storage policy.
@param src The source path referring to either a directory or a file.
@param policyName The name of the storage policy.]]>
</doc>
</method>
<doc>
<![CDATA[The public API for performing administrative functions on HDFS. Those writing
applications against HDFS should prefer this interface to directly accessing
functionality in DistributedFileSystem or DFSClient.
Note that this is distinct from the similarly-named {@link DFSAdmin}, which
is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
commands.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.client.HdfsAdmin -->
<!-- start class org.apache.hadoop.hdfs.client.HdfsDataInputStream -->
<class name="HdfsDataInputStream" extends="org.apache.hadoop.fs.FSDataInputStream"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="HdfsDataInputStream" type="org.apache.hadoop.hdfs.DFSInputStream"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<constructor name="HdfsDataInputStream" type="org.apache.hadoop.crypto.CryptoInputStream"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<method name="getWrappedStream" return="java.io.InputStream"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get a reference to the wrapped output stream. We always want to return the
actual underlying InputStream, even when we're using a CryptoStream. e.g.
in the delegated methods below.
@return the underlying output stream]]>
</doc>
</method>
<method name="getCurrentDatanode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the datanode from which the stream is currently reading.]]>
</doc>
</method>
<method name="getCurrentBlock" return="org.apache.hadoop.hdfs.protocol.ExtendedBlock"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get the block containing the target position.]]>
</doc>
</method>
<method name="getAllBlocks" return="java.util.List"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get the collection of blocks that has already been located.]]>
</doc>
</method>
<method name="getVisibleLength" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get the visible length of the file. It will include the length of the last
block even if that is in UnderConstruction state.
@return The visible length of the file.]]>
</doc>
</method>
<method name="getReadStatistics" return="org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Get statistics about the reads which this DFSInputStream has done.
Note that because HdfsDataInputStream is buffered, these stats may
be higher than you would expect just by adding up the number of
bytes read through HdfsDataInputStream.]]>
</doc>
</method>
<doc>
<![CDATA[The Hdfs implementation of {@link FSDataInputStream}.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.client.HdfsDataInputStream -->
<!-- start class org.apache.hadoop.hdfs.client.HdfsDataOutputStream -->
<class name="HdfsDataOutputStream" extends="org.apache.hadoop.fs.FSDataOutputStream"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="HdfsDataOutputStream" type="org.apache.hadoop.hdfs.DFSOutputStream, org.apache.hadoop.fs.FileSystem.Statistics, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<constructor name="HdfsDataOutputStream" type="org.apache.hadoop.hdfs.DFSOutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<constructor name="HdfsDataOutputStream" type="org.apache.hadoop.crypto.CryptoOutputStream, org.apache.hadoop.fs.FileSystem.Statistics, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<constructor name="HdfsDataOutputStream" type="org.apache.hadoop.crypto.CryptoOutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
</constructor>
<method name="getCurrentBlockReplication" return="int"
abstract="false" native="false" synchronized="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Get the actual number of replicas of the current block.
This can be different from the designated replication factor of the file
because the namenode does not maintain replication for the blocks which are
currently being written to. Depending on the configuration, the client may
continue to write to a block even if a few datanodes in the write pipeline
have failed, or the client may add a new datanodes once a datanode has
failed.
@return the number of valid replicas of the current block]]>
</doc>
</method>
<method name="hsync"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="syncFlags" type="java.util.EnumSet"/>
<exception name="IOException" type="java.io.IOException"/>
<doc>
<![CDATA[Sync buffered data to DataNodes (flush to disk devices).
@param syncFlags
Indicate the detailed semantic and actions of the hsync.
@throws IOException
@see FSDataOutputStream#hsync()]]>
</doc>
</method>
<doc>
<![CDATA[The Hdfs implementation of {@link FSDataOutputStream}.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.client.HdfsDataOutputStream -->
<!-- start class org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag -->
<class name="HdfsDataOutputStream.SyncFlag" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag -->
<!-- start class org.apache.hadoop.hdfs.client.HdfsUtils -->
<class name="HdfsUtils" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="HdfsUtils"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="isHealthy" return="boolean"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="uri" type="java.net.URI"/>
<doc>
<![CDATA[Is the HDFS healthy?
HDFS is considered as healthy if it is up and not in safemode.
@param uri the HDFS URI. Note that the URI path is ignored.
@return true if HDFS is healthy; false, otherwise.]]>
</doc>
</method>
<doc>
<![CDATA[The public utility API for HDFS.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.client.HdfsUtils -->
</package>
<package name="org.apache.hadoop.hdfs.inotify">
<!-- start class org.apache.hadoop.hdfs.inotify.Event -->
<class name="Event" extends="java.lang.Object"
abstract="true"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Event" type="org.apache.hadoop.hdfs.inotify.Event.EventType"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getEventType" return="org.apache.hadoop.hdfs.inotify.Event.EventType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Events sent by the inotify system. Note that no events are necessarily sent
when a file is opened for read (although a MetadataUpdateEvent will be sent
if the atime is updated).]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.AppendEvent -->
<class name="Event.AppendEvent" extends="org.apache.hadoop.hdfs.inotify.Event"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Event.AppendEvent" type="java.lang.String"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Sent when an existing file is opened for append.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.AppendEvent -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.CloseEvent -->
<class name="Event.CloseEvent" extends="org.apache.hadoop.hdfs.inotify.Event"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Event.CloseEvent" type="java.lang.String, long, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getFileSize" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The size of the closed file in bytes. May be -1 if the size is not
available (e.g. in the case of a close generated by a concat operation).]]>
</doc>
</method>
<method name="getTimestamp" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The time when this event occurred, in milliseconds since the epoch.]]>
</doc>
</method>
<doc>
<![CDATA[Sent when a file is closed after append or create.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.CloseEvent -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.CreateEvent -->
<class name="Event.CreateEvent" extends="org.apache.hadoop.hdfs.inotify.Event"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getiNodeType" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.INodeType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getCtime" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Creation time of the file, directory, or symlink.]]>
</doc>
</method>
<method name="getReplication" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Replication is zero if the CreateEvent iNodeType is directory or symlink.]]>
</doc>
</method>
<method name="getOwnerName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getGroupName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPerms" return="org.apache.hadoop.fs.permission.FsPermission"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getSymlinkTarget" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Symlink target is null if the CreateEvent iNodeType is not symlink.]]>
</doc>
</method>
<method name="getOverwrite" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Sent when a new file is created (including overwrite).]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.CreateEvent -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder -->
<class name="Event.CreateEvent.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Event.CreateEvent.Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="iNodeType" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.INodeType"/>
</method>
<method name="path" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="java.lang.String"/>
</method>
<method name="ctime" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ctime" type="long"/>
</method>
<method name="replication" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="replication" type="int"/>
</method>
<method name="ownerName" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ownerName" type="java.lang.String"/>
</method>
<method name="groupName" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="groupName" type="java.lang.String"/>
</method>
<method name="perms" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="perms" type="org.apache.hadoop.fs.permission.FsPermission"/>
</method>
<method name="symlinkTarget" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="symlinkTarget" type="java.lang.String"/>
</method>
<method name="overwrite" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="overwrite" type="boolean"/>
</method>
<method name="build" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.CreateEvent.Builder -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.CreateEvent.INodeType -->
<class name="Event.CreateEvent.INodeType" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.INodeType[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.inotify.Event.CreateEvent.INodeType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.CreateEvent.INodeType -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.EventType -->
<class name="Event.EventType" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.inotify.Event.EventType[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.inotify.Event.EventType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.EventType -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent -->
<class name="Event.MetadataUpdateEvent" extends="org.apache.hadoop.hdfs.inotify.Event"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getMetadataType" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.MetadataType"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getMtime" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getAtime" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getReplication" return="int"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getOwnerName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getGroupName" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getPerms" return="org.apache.hadoop.fs.permission.FsPermission"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getAcls" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The full set of ACLs currently associated with this file or directory.
May be null if all ACLs were removed.]]>
</doc>
</method>
<method name="getxAttrs" return="java.util.List"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="isxAttrsRemoved" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[Whether the xAttrs returned by getxAttrs() were removed (as opposed to
added).]]>
</doc>
</method>
<doc>
<![CDATA[Sent when there is an update to directory or file (none of the metadata
tracked here applies to symlinks) that is not associated with another
inotify event. The tracked metadata includes atime/mtime, replication,
owner/group, permissions, ACLs, and XAttributes. Fields not relevant to the
metadataType of the MetadataUpdateEvent will be null or will have their default
values.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder -->
<class name="Event.MetadataUpdateEvent.Builder" extends="java.lang.Object"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Event.MetadataUpdateEvent.Builder"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="path" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="path" type="java.lang.String"/>
</method>
<method name="metadataType" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="type" type="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.MetadataType"/>
</method>
<method name="mtime" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="mtime" type="long"/>
</method>
<method name="atime" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="atime" type="long"/>
</method>
<method name="replication" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="replication" type="int"/>
</method>
<method name="ownerName" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="ownerName" type="java.lang.String"/>
</method>
<method name="groupName" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="groupName" type="java.lang.String"/>
</method>
<method name="perms" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="perms" type="org.apache.hadoop.fs.permission.FsPermission"/>
</method>
<method name="acls" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="acls" type="java.util.List"/>
</method>
<method name="xAttrs" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="xAttrs" type="java.util.List"/>
</method>
<method name="xAttrsRemoved" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="xAttrsRemoved" type="boolean"/>
</method>
<method name="build" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.Builder -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.MetadataType -->
<class name="Event.MetadataUpdateEvent.MetadataType" extends="java.lang.Enum"
abstract="false"
static="true" final="true" visibility="public"
deprecated="not deprecated">
<method name="values" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.MetadataType[]"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="valueOf" return="org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.MetadataType"
abstract="false" native="false" synchronized="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<param name="name" type="java.lang.String"/>
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.MetadataUpdateEvent.MetadataType -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.RenameEvent -->
<class name="Event.RenameEvent" extends="org.apache.hadoop.hdfs.inotify.Event"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Event.RenameEvent" type="java.lang.String, java.lang.String, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getSrcPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getDstPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getTimestamp" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The time when this event occurred, in milliseconds since the epoch.]]>
</doc>
</method>
<doc>
<![CDATA[Sent when a file, directory, or symlink is renamed.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.RenameEvent -->
<!-- start class org.apache.hadoop.hdfs.inotify.Event.UnlinkEvent -->
<class name="Event.UnlinkEvent" extends="org.apache.hadoop.hdfs.inotify.Event"
abstract="false"
static="true" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="Event.UnlinkEvent" type="java.lang.String, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getPath" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getTimestamp" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[The time when this event occurred, in milliseconds since the epoch.]]>
</doc>
</method>
<doc>
<![CDATA[Sent when a file, directory, or symlink is deleted.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.Event.UnlinkEvent -->
<!-- start class org.apache.hadoop.hdfs.inotify.MissingEventsException -->
<class name="MissingEventsException" extends="java.lang.Exception"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="MissingEventsException"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<constructor name="MissingEventsException" type="long, long"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getExpectedTxid" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getActualTxid" return="long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="toString" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
</class>
<!-- end class org.apache.hadoop.hdfs.inotify.MissingEventsException -->
</package>
<package name="org.apache.hadoop.hdfs.net">
</package>
<package name="org.apache.hadoop.hdfs.protocol">
<!-- start class org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry -->
<class name="CacheDirectiveEntry" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<constructor name="CacheDirectiveEntry" type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo, org.apache.hadoop.hdfs.protocol.CacheDirectiveStats"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</constructor>
<method name="getInfo" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<method name="getStats" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveStats"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
</method>
<doc>
<![CDATA[Describes a path-based cache directive entry.]]>
</doc>
</class>
<!-- end class org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry -->
<!-- start class org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo -->
<class name="CacheDirectiveInfo" extends="java.lang.Object"
abstract="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<method name="getId" return="java.lang.Long"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The ID of this directive.]]>
</doc>
</method>
<method name="getPath" return="org.apache.hadoop.fs.Path"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The path used in this request.]]>
</doc>
</method>
<method name="getReplication" return="java.lang.Short"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The number of times the block should be cached.]]>
</doc>
</method>
<method name="getPool" return="java.lang.String"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return The pool used in this request.]]>
</doc>
</method>
<method name="getExpiration" return="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<doc>
<![CDATA[@return When this directive expires.]]>
</doc>
</method>
<method name="equals" return="boolean"
abstract="false" native="false" synchronized="false"
static="false" final="false" visibility="public"
deprecated="not deprecated">
<param name="o" type="java.lang.Object"/>
</method>
<method name="hashCode" return="int"
abstract="false" native="false" synchronized="false"
static=