HADOOP-18305. Release Hadoop 3.3.4: upstream changelog and jdiff files

Add the r3.3.4 changelog, release notes and jdiff xml files.
diff --git a/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_3.3.4.xml b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_3.3.4.xml
new file mode 100644
index 0000000..62a0e09
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_3.3.4.xml
@@ -0,0 +1,39037 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Fri Jul 29 13:58:49 GMT 2022 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop Common 3.3.4"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-common-project/hadoop-common/target/hadoop-annotations.jar:/build/source/hadoop-common-project/hadoop-common/target/jdiff.jar -verbose -classpath /build/source/hadoop-common-project/hadoop-common/target/classes:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-protobuf_3_7/1.1.1/hadoop-shaded-protobuf_3_7-1.1.1.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-3.3.4.jar:/usr/lib/jvm/java-8-openjdk-amd64/lib/tools.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-guava/1.1.1/hadoop-shaded-guava-1.1.1.jar:/maven/com/google/guava/guava/27.0-jre/guava-27.0-jre.jar:/maven/com/google/guava/failureaccess/1.0/failureaccess-1.0.jar:/maven/com/google/guava/listenablefuture/9999.0-empty-to-avoid-conflict-with-guava/listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar:/maven/org/checkerframework/checker-qual/2.5.2/checker-qual-2.5.2.jar:/maven/com/google/j2objc/j2objc-annotations/1.1/j2objc-annotations-1.1.jar:/maven/org/codehaus/mojo/animal-sniffer-annotations/1.17/animal-sniffer-annotations-1.17.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/org/apache/httpcomponents/httpclient/4.5.13/httpclient-4.5.13.jar:/maven/org/apache/httpcomponents/httpcore/4.4.13/httpcore-4.4.13.jar:/maven/commons-codec/commons-codec/1.15/commons-codec-1.15.jar:/maven/commons-io/commons-io/2.8.0/commons-io-2.8.0.jar:/maven/commons-net/commons-net/3.6/commons-net-3.6.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/javax/servlet/javax.servlet-api/3.1.0/javax.servlet-api-3.1.0.jar:/maven/org/eclipse/jetty/jetty-server/9.4.43.v20210629/jetty-server-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-http/9.4.43.v20210629/jetty-http-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-io/9.4.43.v20210629/jetty-io-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-util/9.4.43.v20210629/jetty-util-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-servlet/9.4.43.v20210629/jetty-servlet-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-security/9.4.43.v20210629/jetty-security-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-webapp/9.4.43.v20210629/jetty-webapp-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-xml/9.4.43.v20210629/jetty-xml-9.4.43.v20210629.jar:/maven/com/sun/jersey/jersey-core/1.19/jersey-core-1.19.jar:/maven/javax/ws/rs/jsr311-api/1.1.1/jsr311-api-1.1.1.jar:/maven/com/sun/jersey/jersey-servlet/1.19/jersey-servlet-1.19.jar:/maven/com/sun/jersey/jersey-json/1.19/jersey-json-1.19.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.11/jaxb-api-2.2.11.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/com/sun/jersey/jersey-server/1.19/jersey-server-1.19.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/ch/qos/reload4j/reload4j/1.2.22/reload4j-1.2.22.jar:/maven/commons-beanutils/commons-beanutils/1.9.4/commons-beanutils-1.9.4.jar:/maven/org/apache/commons/commons-configuration2/2.1.1/commons-configuration2-2.1.1.jar:/maven/org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.jar:/maven/org/apache/commons/commons-text/1.4/commons-text-1.4.jar:/maven/org/slf4j/slf4j-api/1.7.36/slf4j-api-1.7.36.jar:/maven/org/slf4j/slf4j-reload4j/1.7.36/slf4j-reload4j-1.7.36.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/com/google/re2j/re2j/1.1/re2j-1.1.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/com/google/code/gson/gson/2.8.9/gson-2.8.9.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-3.3.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/9.8.1/nimbus-jose-jwt-9.8.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/2.4.7/json-smart-2.4.7.jar:/maven/net/minidev/accessors-smart/2.4.7/accessors-smart-2.4.7.jar:/maven/org/ow2/asm/asm/5.0.4/asm-5.0.4.jar:/maven/org/apache/curator/curator-framework/4.2.0/curator-framework-4.2.0.jar:/maven/org/apache/kerby/kerb-simplekdc/1.0.1/kerb-simplekdc-1.0.1.jar:/maven/org/apache/kerby/kerb-client/1.0.1/kerb-client-1.0.1.jar:/maven/org/apache/kerby/kerby-config/1.0.1/kerby-config-1.0.1.jar:/maven/org/apache/kerby/kerb-common/1.0.1/kerb-common-1.0.1.jar:/maven/org/apache/kerby/kerb-crypto/1.0.1/kerb-crypto-1.0.1.jar:/maven/org/apache/kerby/kerb-util/1.0.1/kerb-util-1.0.1.jar:/maven/org/apache/kerby/token-provider/1.0.1/token-provider-1.0.1.jar:/maven/org/apache/kerby/kerb-admin/1.0.1/kerb-admin-1.0.1.jar:/maven/org/apache/kerby/kerb-server/1.0.1/kerb-server-1.0.1.jar:/maven/org/apache/kerby/kerb-identity/1.0.1/kerb-identity-1.0.1.jar:/maven/org/apache/kerby/kerby-xdr/1.0.1/kerby-xdr-1.0.1.jar:/maven/com/jcraft/jsch/0.1.55/jsch-0.1.55.jar:/maven/org/apache/curator/curator-client/4.2.0/curator-client-4.2.0.jar:/maven/org/apache/curator/curator-recipes/4.2.0/curator-recipes-4.2.0.jar:/maven/com/google/code/findbugs/jsr305/3.0.2/jsr305-3.0.2.jar:/maven/org/apache/zookeeper/zookeeper/3.5.6/zookeeper-3.5.6.jar:/maven/org/apache/zookeeper/zookeeper-jute/3.5.6/zookeeper-jute-3.5.6.jar:/maven/org/apache/yetus/audience-annotations/0.5.0/audience-annotations-0.5.0.jar:/maven/org/apache/commons/commons-compress/1.21/commons-compress-1.21.jar:/maven/org/apache/kerby/kerb-core/1.0.1/kerb-core-1.0.1.jar:/maven/org/apache/kerby/kerby-pkix/1.0.1/kerby-pkix-1.0.1.jar:/maven/org/apache/kerby/kerby-asn1/1.0.1/kerby-asn1-1.0.1.jar:/maven/org/apache/kerby/kerby-util/1.0.1/kerby-util-1.0.1.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.12.7/jackson-databind-2.12.7.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.12.7/jackson-annotations-2.12.7.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.12.7/jackson-core-2.12.7.jar:/maven/org/codehaus/woodstox/stax2-api/4.2.1/stax2-api-4.2.1.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.3.0/woodstox-core-5.3.0.jar:/maven/dnsjava/dnsjava/2.1.7/dnsjava-2.1.7.jar:/maven/org/wildfly/openssl/wildfly-openssl-java/1.0.7.Final/wildfly-openssl-java-1.0.7.Final.jar:/maven/org/xerial/snappy/snappy-java/1.1.8.2/snappy-java-1.1.8.2.jar:/maven/org/lz4/lz4-java/1.7.1/lz4-java-1.7.1.jar:/maven/xerces/xercesImpl/2.12.2/xercesImpl-2.12.2.jar:/maven/xml-apis/xml-apis/1.4.01/xml-apis-1.4.01.jar -sourcepath /build/source/hadoop-common-project/hadoop-common/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-common-project/hadoop-common/target/hadoop-annotations.jar:/build/source/hadoop-common-project/hadoop-common/target/jdiff.jar -apidir /build/source/hadoop-common-project/hadoop-common/target/site/jdiff/xml -apiname Apache Hadoop Common 3.3.4 -->
+<package name="org.apache.hadoop">
+  <!-- start class org.apache.hadoop.HadoopIllegalArgumentException -->
+  <class name="HadoopIllegalArgumentException" extends="java.lang.IllegalArgumentException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="HadoopIllegalArgumentException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs exception with the specified detail message. 
+ @param message detailed message.]]>
+      </doc>
+    </constructor>
+    <doc>
+    <![CDATA[Indicates that a method has been passed illegal or invalid argument. This
+ exception is thrown instead of IllegalArgumentException to differentiate the
+ exception thrown in Hadoop implementation from the one thrown in JDK.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.HadoopIllegalArgumentException -->
+</package>
+<package name="org.apache.hadoop.conf">
+  <!-- start interface org.apache.hadoop.conf.Configurable -->
+  <interface name="Configurable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="setConf"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Set the configuration to be used by this object.
+ @param conf configuration to be used]]>
+      </doc>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the configuration used by this object.
+ @return Configuration]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Something that may be configured with a {@link Configuration}.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.conf.Configurable -->
+  <!-- start class org.apache.hadoop.conf.Configuration -->
+  <class name="Configuration" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Iterable"/>
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A new configuration.]]>
+      </doc>
+    </constructor>
+    <constructor name="Configuration" type="boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A new configuration where the behavior of reading from the default 
+ resources can be turned off.
+ 
+ If the parameter {@code loadDefaults} is false, the new instance
+ will not load resources from the default files. 
+ @param loadDefaults specifies whether to load from the default files]]>
+      </doc>
+    </constructor>
+    <constructor name="Configuration" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A new configuration with the same settings cloned from another.
+ 
+ @param other the configuration from which to clone settings.]]>
+      </doc>
+    </constructor>
+    <method name="addDeprecations"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="deltas" type="org.apache.hadoop.conf.Configuration.DeprecationDelta[]"/>
+      <doc>
+      <![CDATA[Adds a set of deprecated keys to the global deprecations.
+
+ This method is lockless.  It works by means of creating a new
+ DeprecationContext based on the old one, and then atomically swapping in
+ the new context.  If someone else updated the context in between us reading
+ the old context and swapping in the new one, we try again until we win the
+ race.
+
+ @param deltas   The deprecations to add.]]>
+      </doc>
+    </method>
+    <method name="addDeprecation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="use {@link #addDeprecation(String key, String newKey,
+      String customMessage)} instead">
+      <param name="key" type="java.lang.String"/>
+      <param name="newKeys" type="java.lang.String[]"/>
+      <param name="customMessage" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Adds the deprecated key to the global deprecation map.
+ It does not override any existing entries in the deprecation map.
+ This is to be used only by the developers in order to add deprecation of
+ keys, and attempts to call this method after loading resources once,
+ would lead to <tt>UnsupportedOperationException</tt>
+ 
+ If a key is deprecated in favor of multiple keys, they are all treated as 
+ aliases of each other, and setting any one of them resets all the others 
+ to the new value.
+
+ If you have multiple deprecation entries to add, it is more efficient to
+ use #addDeprecations(DeprecationDelta[] deltas) instead.
+ 
+ @param key to be deprecated
+ @param newKeys list of keys that take up the values of deprecated key
+ @param customMessage depcrication message
+ @deprecated use {@link #addDeprecation(String key, String newKey,
+      String customMessage)} instead]]>
+      </doc>
+    </method>
+    <method name="addDeprecation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="newKey" type="java.lang.String"/>
+      <param name="customMessage" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Adds the deprecated key to the global deprecation map.
+ It does not override any existing entries in the deprecation map.
+ This is to be used only by the developers in order to add deprecation of
+ keys, and attempts to call this method after loading resources once,
+ would lead to <tt>UnsupportedOperationException</tt>
+ 
+ If you have multiple deprecation entries to add, it is more efficient to
+ use #addDeprecations(DeprecationDelta[] deltas) instead.
+
+ @param key to be deprecated
+ @param newKey key that take up the values of deprecated key
+ @param customMessage deprecation message]]>
+      </doc>
+    </method>
+    <method name="addDeprecation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="use {@link #addDeprecation(String key, String newKey)} instead">
+      <param name="key" type="java.lang.String"/>
+      <param name="newKeys" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Adds the deprecated key to the global deprecation map when no custom
+ message is provided.
+ It does not override any existing entries in the deprecation map.
+ This is to be used only by the developers in order to add deprecation of
+ keys, and attempts to call this method after loading resources once,
+ would lead to <tt>UnsupportedOperationException</tt>
+ 
+ If a key is deprecated in favor of multiple keys, they are all treated as 
+ aliases of each other, and setting any one of them resets all the others 
+ to the new value.
+ 
+ If you have multiple deprecation entries to add, it is more efficient to
+ use #addDeprecations(DeprecationDelta[] deltas) instead.
+
+ @param key Key that is to be deprecated
+ @param newKeys list of keys that take up the values of deprecated key
+ @deprecated use {@link #addDeprecation(String key, String newKey)} instead]]>
+      </doc>
+    </method>
+    <method name="addDeprecation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="newKey" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Adds the deprecated key to the global deprecation map when no custom
+ message is provided.
+ It does not override any existing entries in the deprecation map.
+ This is to be used only by the developers in order to add deprecation of
+ keys, and attempts to call this method after loading resources once,
+ would lead to <tt>UnsupportedOperationException</tt>
+ 
+ If you have multiple deprecation entries to add, it is more efficient to
+ use #addDeprecations(DeprecationDelta[] deltas) instead.
+
+ @param key Key that is to be deprecated
+ @param newKey key that takes up the value of deprecated key]]>
+      </doc>
+    </method>
+    <method name="isDeprecated" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <doc>
+      <![CDATA[checks whether the given <code>key</code> is deprecated.
+ 
+ @param key the parameter which is to be checked for deprecation
+ @return <code>true</code> if the key is deprecated and 
+         <code>false</code> otherwise.]]>
+      </doc>
+    </method>
+    <method name="setDeprecatedProperties"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Sets all deprecated properties that are not currently set but have a
+ corresponding new property that is set. Useful for iterating the
+ properties when all deprecated properties for currently set properties
+ need to be present.]]>
+      </doc>
+    </method>
+    <method name="reloadExistingConfigurations"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Reload existing configuration instances.]]>
+      </doc>
+    </method>
+    <method name="addDefaultResource"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a default resource. Resources are loaded in the order of the resources 
+ added.
+ @param name file name. File should be present in the classpath.]]>
+      </doc>
+    </method>
+    <method name="setRestrictSystemPropertiesDefault"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="boolean"/>
+    </method>
+    <method name="setRestrictSystemProperties"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a configuration resource. 
+ 
+ The properties of this resource will override properties of previously 
+ added resources, unless they were marked <a href="#Final">final</a>. 
+ 
+ @param name resource to be added, the classpath is examined for a file 
+             with that name.]]>
+      </doc>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="restrictedParser" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <doc>
+      <![CDATA[Add a configuration resource. 
+ 
+ The properties of this resource will override properties of previously 
+ added resources, unless they were marked <a href="#Final">final</a>. 
+ 
+ @param url url of the resource to be added, the local filesystem is 
+            examined directly to find the resource, without referring to 
+            the classpath.]]>
+      </doc>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="restrictedParser" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Add a configuration resource. 
+ 
+ The properties of this resource will override properties of previously 
+ added resources, unless they were marked <a href="#Final">final</a>. 
+ 
+ @param file file-path of resource to be added, the local filesystem is
+             examined directly to find the resource, without referring to 
+             the classpath.]]>
+      </doc>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="restrictedParser" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <doc>
+      <![CDATA[Add a configuration resource. 
+ 
+ The properties of this resource will override properties of previously 
+ added resources, unless they were marked <a href="#Final">final</a>. 
+ 
+ WARNING: The contents of the InputStream will be cached, by this method. 
+ So use this sparingly because it does increase the memory consumption.
+ 
+ @param in InputStream to deserialize the object from. In will be read from
+ when a get or set is called next.  After it is read the stream will be
+ closed.]]>
+      </doc>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="restrictedParser" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a configuration resource. 
+ 
+ The properties of this resource will override properties of previously 
+ added resources, unless they were marked <a href="#Final">final</a>. 
+ 
+ @param in InputStream to deserialize the object from.
+ @param name the name of the resource because InputStream.toString is not
+ very descriptive some times.]]>
+      </doc>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="restrictedParser" type="boolean"/>
+    </method>
+    <method name="addResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Add a configuration resource.
+
+ The properties of this resource will override properties of previously
+ added resources, unless they were marked <a href="#Final">final</a>.
+
+ @param conf Configuration object from which to load properties]]>
+      </doc>
+    </method>
+    <method name="reloadConfiguration"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Reload configuration from previously added resources.
+
+ This method will clear all the configuration read from the added 
+ resources, and final parameters. This will make the resources to 
+ be read again before accessing the values. Values that are added
+ via set methods will overlay values read from the resources.]]>
+      </doc>
+    </method>
+    <method name="get" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property, <code>null</code> if
+ no such property exists. If the key is deprecated, it returns the value of
+ the first key which replaces the deprecated key and is not null.
+ 
+ Values are processed for <a href="#VariableExpansion">variable expansion</a> 
+ before being returned.
+
+ As a side effect get loads the properties from the sources if called for
+ the first time as a lazy init.
+ 
+ @param name the property name, will be trimmed before get value.
+ @return the value of the <code>name</code> or its replacing property, 
+         or null if no such property exists.]]>
+      </doc>
+    </method>
+    <method name="setAllowNullValueProperties"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="boolean"/>
+      <doc>
+      <![CDATA[Set Configuration to allow keys without values during setup.  Intended
+ for use during testing.
+
+ @param val If true, will allow Configuration to store keys without values]]>
+      </doc>
+    </method>
+    <method name="setRestrictSystemProps"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="boolean"/>
+    </method>
+    <method name="onlyKeyExists" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Return existence of the <code>name</code> property, but only for
+ names which have no valid value, usually non-existent or commented
+ out in XML.
+
+ @param name the property name
+ @return true if the property <code>name</code> exists without value]]>
+      </doc>
+    </method>
+    <method name="getTrimmed" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a trimmed <code>String</code>, 
+ <code>null</code> if no such property exists. 
+ If the key is deprecated, it returns the value of
+ the first key which replaces the deprecated key and is not null
+ 
+ Values are processed for <a href="#VariableExpansion">variable expansion</a> 
+ before being returned. 
+ 
+ @param name the property name.
+ @return the value of the <code>name</code> or its replacing property, 
+         or null if no such property exists.]]>
+      </doc>
+    </method>
+    <method name="getTrimmed" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a trimmed <code>String</code>, 
+ <code>defaultValue</code> if no such property exists. 
+ See @{Configuration#getTrimmed} for more details.
+ 
+ @param name          the property name.
+ @param defaultValue  the property default value.
+ @return              the value of the <code>name</code> or defaultValue
+                      if it is not set.]]>
+      </doc>
+    </method>
+    <method name="getRaw" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property, without doing
+ <a href="#VariableExpansion">variable expansion</a>.If the key is 
+ deprecated, it returns the value of the first key which replaces 
+ the deprecated key and is not null.
+ 
+ @param name the property name.
+ @return the value of the <code>name</code> property or 
+         its replacing property and null if no such property exists.]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <code>value</code> of the <code>name</code> property. If 
+ <code>name</code> is deprecated or there is a deprecated name associated to it,
+ it sets the value to both names. Name will be trimmed before put into
+ configuration.
+ 
+ @param name property name.
+ @param value property value.]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <param name="source" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <code>value</code> of the <code>name</code> property. If 
+ <code>name</code> is deprecated, it also sets the <code>value</code> to
+ the keys that replace the deprecated key. Name will be trimmed before put
+ into configuration.
+
+ @param name property name.
+ @param value property value.
+ @param source the place that this configuration value came from 
+ (For debugging).
+ @throws IllegalArgumentException when the value or name is null.]]>
+      </doc>
+    </method>
+    <method name="unset"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Unset a previously set property.
+ @param name the property name]]>
+      </doc>
+    </method>
+    <method name="setIfUnset"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Sets a property if it is currently unset.
+ @param name the property name
+ @param value the new value]]>
+      </doc>
+    </method>
+    <method name="get" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code>. If the key is deprecated,
+ it returns the value of the first key which replaces the deprecated key
+ and is not null.
+ If no such property exists,
+ then <code>defaultValue</code> is returned.
+ 
+ @param name property name, will be trimmed before get value.
+ @param defaultValue default value.
+ @return property value, or <code>defaultValue</code> if the property 
+         doesn't exist.]]>
+      </doc>
+    </method>
+    <method name="getInt" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="int"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as an <code>int</code>.
+   
+ If no such property exists, the provided default value is returned,
+ or if the specified value is not a valid <code>int</code>,
+ then an error is thrown.
+ 
+ @param name property name.
+ @param defaultValue default value.
+ @throws NumberFormatException when the value is invalid
+ @return property value as an <code>int</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="getInts" return="int[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a set of comma-delimited
+ <code>int</code> values.
+ 
+ If no such property exists, an empty array is returned.
+ 
+ @param name property name
+ @return property value interpreted as an array of comma-delimited
+         <code>int</code> values]]>
+      </doc>
+    </method>
+    <method name="setInt"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="int"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to an <code>int</code>.
+ 
+ @param name property name.
+ @param value <code>int</code> value of the property.]]>
+      </doc>
+    </method>
+    <method name="getLong" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="long"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>long</code>.  
+ If no such property exists, the provided default value is returned,
+ or if the specified value is not a valid <code>long</code>,
+ then an error is thrown.
+ 
+ @param name property name.
+ @param defaultValue default value.
+ @throws NumberFormatException when the value is invalid
+ @return property value as a <code>long</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="getLongBytes" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="long"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>long</code> or
+ human readable format. If no such property exists, the provided default
+ value is returned, or if the specified value is not a valid
+ <code>long</code> or human readable format, then an error is thrown. You
+ can use the following suffix (case insensitive): k(kilo), m(mega), g(giga),
+ t(tera), p(peta), e(exa)
+
+ @param name property name.
+ @param defaultValue default value.
+ @throws NumberFormatException when the value is invalid
+ @return property value as a <code>long</code>,
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="setLong"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to a <code>long</code>.
+ 
+ @param name property name.
+ @param value <code>long</code> value of the property.]]>
+      </doc>
+    </method>
+    <method name="getFloat" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="float"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>float</code>.  
+ If no such property exists, the provided default value is returned,
+ or if the specified value is not a valid <code>float</code>,
+ then an error is thrown.
+
+ @param name property name.
+ @param defaultValue default value.
+ @throws NumberFormatException when the value is invalid
+ @return property value as a <code>float</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="setFloat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="float"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to a <code>float</code>.
+ 
+ @param name property name.
+ @param value property value.]]>
+      </doc>
+    </method>
+    <method name="getDouble" return="double"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="double"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>double</code>.  
+ If no such property exists, the provided default value is returned,
+ or if the specified value is not a valid <code>double</code>,
+ then an error is thrown.
+
+ @param name property name.
+ @param defaultValue default value.
+ @throws NumberFormatException when the value is invalid
+ @return property value as a <code>double</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="setDouble"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="double"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to a <code>double</code>.
+ 
+ @param name property name.
+ @param value property value.]]>
+      </doc>
+    </method>
+    <method name="getBoolean" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="boolean"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>boolean</code>.  
+ If no such property is specified, or if the specified value is not a valid
+ <code>boolean</code>, then <code>defaultValue</code> is returned.
+ 
+ @param name property name.
+ @param defaultValue default value.
+ @return property value as a <code>boolean</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="setBoolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="boolean"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to a <code>boolean</code>.
+ 
+ @param name property name.
+ @param value <code>boolean</code> value of the property.]]>
+      </doc>
+    </method>
+    <method name="setBooleanIfUnset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="boolean"/>
+      <doc>
+      <![CDATA[Set the given property, if it is currently unset.
+ @param name property name
+ @param value new value]]>
+      </doc>
+    </method>
+    <method name="setEnum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="T"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to the given type. This
+ is equivalent to <code>set(&lt;name&gt;, value.toString())</code>.
+ @param name property name
+ @param value new value
+ @param <T> enumeration type]]>
+      </doc>
+    </method>
+    <method name="getEnum" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="T"/>
+      <doc>
+      <![CDATA[Return value matching this enumerated type.
+ Note that the returned value is trimmed by this method.
+ @param name Property name
+ @param defaultValue Value returned if no mapping exists
+ @param <T> enumeration type
+ @throws IllegalArgumentException If mapping is illegal for the type
+ provided
+ @return enumeration type]]>
+      </doc>
+    </method>
+    <method name="setTimeDuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="long"/>
+      <param name="unit" type="java.util.concurrent.TimeUnit"/>
+      <doc>
+      <![CDATA[Set the value of <code>name</code> to the given time duration. This
+ is equivalent to <code>set(&lt;name&gt;, value + &lt;time suffix&gt;)</code>.
+ @param name Property name
+ @param value Time duration
+ @param unit Unit of time]]>
+      </doc>
+    </method>
+    <method name="getTimeDuration" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="long"/>
+      <param name="unit" type="java.util.concurrent.TimeUnit"/>
+      <doc>
+      <![CDATA[Return time duration in the given time unit. Valid units are encoded in
+ properties as suffixes: nanoseconds (ns), microseconds (us), milliseconds
+ (ms), seconds (s), minutes (m), hours (h), and days (d).
+
+ @param name Property name
+ @param defaultValue Value returned if no mapping exists.
+ @param unit Unit to convert the stored property, if it exists.
+ @throws NumberFormatException If the property stripped of its unit is not
+         a number
+ @return time duration in given time unit]]>
+      </doc>
+    </method>
+    <method name="getTimeDuration" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String"/>
+      <param name="unit" type="java.util.concurrent.TimeUnit"/>
+    </method>
+    <method name="getTimeDuration" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="long"/>
+      <param name="defaultUnit" type="java.util.concurrent.TimeUnit"/>
+      <param name="returnUnit" type="java.util.concurrent.TimeUnit"/>
+      <doc>
+      <![CDATA[Return time duration in the given time unit. Valid units are encoded in
+ properties as suffixes: nanoseconds (ns), microseconds (us), milliseconds
+ (ms), seconds (s), minutes (m), hours (h), and days (d). If no unit is
+ provided, the default unit is applied.
+
+ @param name Property name
+ @param defaultValue Value returned if no mapping exists.
+ @param defaultUnit Default time unit if no valid suffix is provided.
+ @param returnUnit The unit used for the returned value.
+ @throws NumberFormatException If the property stripped of its unit is not
+         a number
+ @return time duration in given time unit]]>
+      </doc>
+    </method>
+    <method name="getTimeDuration" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String"/>
+      <param name="defaultUnit" type="java.util.concurrent.TimeUnit"/>
+      <param name="returnUnit" type="java.util.concurrent.TimeUnit"/>
+    </method>
+    <method name="getTimeDurationHelper" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="vStr" type="java.lang.String"/>
+      <param name="unit" type="java.util.concurrent.TimeUnit"/>
+      <doc>
+      <![CDATA[Return time duration in the given time unit. Valid units are encoded in
+ properties as suffixes: nanoseconds (ns), microseconds (us), milliseconds
+ (ms), seconds (s), minutes (m), hours (h), and days (d).
+
+ @param name Property name
+ @param vStr The string value with time unit suffix to be converted.
+ @param unit Unit to convert the stored property, if it exists.]]>
+      </doc>
+    </method>
+    <method name="getTimeDurations" return="long[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="unit" type="java.util.concurrent.TimeUnit"/>
+    </method>
+    <method name="getStorageSize" return="double"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String"/>
+      <param name="targetUnit" type="org.apache.hadoop.conf.StorageUnit"/>
+      <doc>
+      <![CDATA[Gets the Storage Size from the config, or returns the defaultValue. The
+ unit of return value is specified in target unit.
+
+ @param name - Key Name
+ @param defaultValue - Default Value -- e.g. 100MB
+ @param targetUnit - The units that we want result to be in.
+ @return double -- formatted in target Units]]>
+      </doc>
+    </method>
+    <method name="getStorageSize" return="double"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="double"/>
+      <param name="targetUnit" type="org.apache.hadoop.conf.StorageUnit"/>
+      <doc>
+      <![CDATA[Gets storage size from a config file.
+
+ @param name - Key to read.
+ @param defaultValue - The default value to return in case the key is
+ not present.
+ @param targetUnit - The Storage unit that should be used
+ for the return value.
+ @return - double value in the Storage Unit specified.]]>
+      </doc>
+    </method>
+    <method name="setStorageSize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="double"/>
+      <param name="unit" type="org.apache.hadoop.conf.StorageUnit"/>
+      <doc>
+      <![CDATA[Sets Storage Size for the specified key.
+
+ @param name - Key to set.
+ @param value - The numeric value to set.
+ @param unit - Storage Unit to be used.]]>
+      </doc>
+    </method>
+    <method name="getPattern" return="java.util.regex.Pattern"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.util.regex.Pattern"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>Pattern</code>.
+ If no such property is specified, or if the specified value is not a valid
+ <code>Pattern</code>, then <code>DefaultValue</code> is returned.
+ Note that the returned value is NOT trimmed by this method.
+
+ @param name property name
+ @param defaultValue default value
+ @return property value as a compiled Pattern, or defaultValue]]>
+      </doc>
+    </method>
+    <method name="setPattern"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="pattern" type="java.util.regex.Pattern"/>
+      <doc>
+      <![CDATA[Set the given property to <code>Pattern</code>.
+ If the pattern is passed as null, sets the empty pattern which results in
+ further calls to getPattern(...) returning the default value.
+
+ @param name property name
+ @param pattern new value]]>
+      </doc>
+    </method>
+    <method name="getPropertySources" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Gets information about why a property was set.  Typically this is the 
+ path to the resource objects (file, URL, etc.) the property came from, but
+ it can also indicate that it was set programmatically, or because of the
+ command line.
+
+ @param name - The property name to get the source of.
+ @return null - If the property or its source wasn't found. Otherwise, 
+ returns a list of the sources of the resource.  The older sources are
+ the first ones in the list.  So for example if a configuration is set from
+ the command line, and then written out to a file that is read back in the
+ first entry would indicate that it was set from the command line, while
+ the second one would indicate the file that the new configuration was read
+ in from.]]>
+      </doc>
+    </method>
+    <method name="getRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Parse the given attribute as a set of integer ranges
+ @param name the attribute name
+ @param defaultValue the default value if it is not set
+ @return a new set of ranges from the configured value]]>
+      </doc>
+    </method>
+    <method name="getStringCollection" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ a collection of <code>String</code>s.  
+ If no such property is specified then empty collection is returned.
+ <p>
+ This is an optimized version of {@link #getStrings(String)}
+ 
+ @param name property name.
+ @return property value as a collection of <code>String</code>s.]]>
+      </doc>
+    </method>
+    <method name="getStrings" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ an array of <code>String</code>s.  
+ If no such property is specified then <code>null</code> is returned.
+ 
+ @param name property name.
+ @return property value as an array of <code>String</code>s, 
+         or <code>null</code>.]]>
+      </doc>
+    </method>
+    <method name="getStrings" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ an array of <code>String</code>s.  
+ If no such property is specified then default value is returned.
+ 
+ @param name property name.
+ @param defaultValue The default value
+ @return property value as an array of <code>String</code>s, 
+         or default value.]]>
+      </doc>
+    </method>
+    <method name="getTrimmedStringCollection" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ a collection of <code>String</code>s, trimmed of the leading and trailing whitespace.  
+ If no such property is specified then empty <code>Collection</code> is returned.
+
+ @param name property name.
+ @return property value as a collection of <code>String</code>s, or empty <code>Collection</code>]]>
+      </doc>
+    </method>
+    <method name="getTrimmedStrings" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ an array of <code>String</code>s, trimmed of the leading and trailing whitespace.
+ If no such property is specified then an empty array is returned.
+ 
+ @param name property name.
+ @return property value as an array of trimmed <code>String</code>s, 
+         or empty array.]]>
+      </doc>
+    </method>
+    <method name="getTrimmedStrings" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Get the comma delimited values of the <code>name</code> property as 
+ an array of <code>String</code>s, trimmed of the leading and trailing whitespace.
+ If no such property is specified then default value is returned.
+ 
+ @param name property name.
+ @param defaultValue The default value
+ @return property value as an array of trimmed <code>String</code>s, 
+         or default value.]]>
+      </doc>
+    </method>
+    <method name="setStrings"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="values" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Set the array of string values for the <code>name</code> property as 
+ as comma delimited values.  
+ 
+ @param name property name.
+ @param values The values]]>
+      </doc>
+    </method>
+    <method name="getPassword" return="char[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the value for a known password configuration element.
+ In order to enable the elimination of clear text passwords in config,
+ this method attempts to resolve the property name as an alias through
+ the CredentialProvider API and conditionally fallsback to config.
+ @param name property name
+ @return password
+ @throws IOException when error in fetching password]]>
+      </doc>
+    </method>
+    <method name="getPasswordFromCredentialProviders" return="char[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Try and resolve the provided element name as a credential provider
+ alias.
+ @param name alias of the provisioned credential
+ @return password or null if not found
+ @throws IOException when error in fetching password]]>
+      </doc>
+    </method>
+    <method name="getPasswordFromConfig" return="char[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Fallback to clear text passwords in configuration.
+ @param name
+ @return clear text password or null]]>
+      </doc>
+    </method>
+    <method name="getSocketAddr" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hostProperty" type="java.lang.String"/>
+      <param name="addressProperty" type="java.lang.String"/>
+      <param name="defaultAddressValue" type="java.lang.String"/>
+      <param name="defaultPort" type="int"/>
+      <doc>
+      <![CDATA[Get the socket address for <code>hostProperty</code> as a
+ <code>InetSocketAddress</code>. If <code>hostProperty</code> is
+ <code>null</code>, <code>addressProperty</code> will be used. This
+ is useful for cases where we want to differentiate between host
+ bind address and address clients should use to establish connection.
+
+ @param hostProperty bind host property name.
+ @param addressProperty address property name.
+ @param defaultAddressValue the default value
+ @param defaultPort the default port
+ @return InetSocketAddress]]>
+      </doc>
+    </method>
+    <method name="getSocketAddr" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultAddress" type="java.lang.String"/>
+      <param name="defaultPort" type="int"/>
+      <doc>
+      <![CDATA[Get the socket address for <code>name</code> property as a
+ <code>InetSocketAddress</code>.
+ @param name property name.
+ @param defaultAddress the default value
+ @param defaultPort the default port
+ @return InetSocketAddress]]>
+      </doc>
+    </method>
+    <method name="setSocketAddr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetSocketAddress"/>
+      <doc>
+      <![CDATA[Set the socket address for the <code>name</code> property as
+ a <code>host:port</code>.]]>
+      </doc>
+    </method>
+    <method name="updateConnectAddr" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hostProperty" type="java.lang.String"/>
+      <param name="addressProperty" type="java.lang.String"/>
+      <param name="defaultAddressValue" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetSocketAddress"/>
+      <doc>
+      <![CDATA[Set the socket address a client can use to connect for the
+ <code>name</code> property as a <code>host:port</code>.  The wildcard
+ address is replaced with the local host's address. If the host and address
+ properties are configured the host component of the address will be combined
+ with the port component of the addr to generate the address.  This is to allow
+ optional control over which host name is used in multi-home bind-host
+ cases where a host can have multiple names
+ @param hostProperty the bind-host configuration name
+ @param addressProperty the service address configuration name
+ @param defaultAddressValue the service default address configuration value
+ @param addr InetSocketAddress of the service listener
+ @return InetSocketAddress for clients to connect]]>
+      </doc>
+    </method>
+    <method name="updateConnectAddr" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetSocketAddress"/>
+      <doc>
+      <![CDATA[Set the socket address a client can use to connect for the
+ <code>name</code> property as a <code>host:port</code>.  The wildcard
+ address is replaced with the local host's address.
+ @param name property name.
+ @param addr InetSocketAddress of a listener to store in the given property
+ @return InetSocketAddress for clients to connect]]>
+      </doc>
+    </method>
+    <method name="getClassByName" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+      <doc>
+      <![CDATA[Load a class by name.
+ 
+ @param name the class name.
+ @return the class object.
+ @throws ClassNotFoundException if the class is not found.]]>
+      </doc>
+    </method>
+    <method name="getClassByNameOrNull" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Load a class by name, returning null rather than throwing an exception
+ if it couldn't be loaded. This is to avoid the overhead of creating
+ an exception.
+ 
+ @param name the class name
+ @return the class object, or null if it could not be found.]]>
+      </doc>
+    </method>
+    <method name="getClasses" return="java.lang.Class[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.Class[]"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property
+ as an array of <code>Class</code>.
+ The value of the property specifies a list of comma separated class names.  
+ If no such property is specified, then <code>defaultValue</code> is 
+ returned.
+ 
+ @param name the property name.
+ @param defaultValue default value.
+ @return property value as a <code>Class[]</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="getClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>.  
+ If no such property is specified, then <code>defaultValue</code> is 
+ returned.
+ 
+ @param name the conf key name.
+ @param defaultValue default value.
+ @return property value as a <code>Class</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="getClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultValue" type="java.lang.Class"/>
+      <param name="xface" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>
+ implementing the interface specified by <code>xface</code>.
+   
+ If no such property is specified, then <code>defaultValue</code> is 
+ returned.
+ 
+ An exception is thrown if the returned class does not implement the named
+ interface. 
+ 
+ @param name the conf key name.
+ @param defaultValue default value.
+ @param xface the interface implemented by the named class.
+ @return property value as a <code>Class</code>, 
+         or <code>defaultValue</code>.]]>
+      </doc>
+    </method>
+    <method name="getInstances" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="xface" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Get the value of the <code>name</code> property as a <code>List</code>
+ of objects implementing the interface specified by <code>xface</code>.
+ 
+ An exception is thrown if any of the classes does not exist, or if it does
+ not implement the named interface.
+ 
+ @param name the property name.
+ @param xface the interface implemented by the classes named by
+        <code>name</code>.
+ @return a <code>List</code> of objects implementing <code>xface</code>.]]>
+      </doc>
+    </method>
+    <method name="setClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="theClass" type="java.lang.Class"/>
+      <param name="xface" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the value of the <code>name</code> property to the name of a 
+ <code>theClass</code> implementing the given interface <code>xface</code>.
+ 
+ An exception is thrown if <code>theClass</code> does not implement the 
+ interface <code>xface</code>. 
+ 
+ @param name property name.
+ @param theClass property value.
+ @param xface the interface implemented by the named class.]]>
+      </doc>
+    </method>
+    <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dirsProp" type="java.lang.String"/>
+      <param name="path" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a local file under a directory named by <i>dirsProp</i> with
+ the given <i>path</i>.  If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code.  If the selected
+ directory does not exist, an attempt is made to create it.
+ 
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+      </doc>
+    </method>
+    <method name="getFile" return="java.io.File"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dirsProp" type="java.lang.String"/>
+      <param name="path" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a local file name under a directory named in <i>dirsProp</i> with
+ the given <i>path</i>.  If <i>dirsProp</i> contains multiple directories,
+ then one is chosen based on <i>path</i>'s hash code.  If the selected
+ directory does not exist, an attempt is made to create it.
+ 
+ @param dirsProp directory in which to locate the file.
+ @param path file-path.
+ @return local file under the directory with the given path.]]>
+      </doc>
+    </method>
+    <method name="getResource" return="java.net.URL"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the {@link URL} for the named resource.
+ 
+ @param name resource name.
+ @return the url for the named resource.]]>
+      </doc>
+    </method>
+    <method name="getConfResourceAsInputStream" return="java.io.InputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get an input stream attached to the configuration resource with the
+ given <code>name</code>.
+ 
+ @param name configuration resource name.
+ @return an input stream attached to the resource.]]>
+      </doc>
+    </method>
+    <method name="getConfResourceAsReader" return="java.io.Reader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get a {@link Reader} attached to the configuration resource with the
+ given <code>name</code>.
+ 
+ @param name configuration resource name.
+ @return a reader attached to the resource.]]>
+      </doc>
+    </method>
+    <method name="getFinalParameters" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the set of parameters marked final.
+
+ @return final parameter set.]]>
+      </doc>
+    </method>
+    <method name="getProps" return="java.util.Properties"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="size" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the number of keys in the configuration.
+
+ @return number of keys in the configuration.]]>
+      </doc>
+    </method>
+    <method name="clear"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Clears all keys from the configuration.]]>
+      </doc>
+    </method>
+    <method name="iterator" return="java.util.Iterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get an {@link Iterator} to go through the list of <code>String</code> 
+ key-value pairs in the configuration.
+ 
+ @return an iterator over the entries.]]>
+      </doc>
+    </method>
+    <method name="getPropsWithPrefix" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="confPrefix" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Constructs a mapping of configuration and includes all properties that
+ start with the specified configuration prefix.  Property names in the
+ mapping are trimmed to remove the configuration prefix.
+
+ @param confPrefix configuration prefix
+ @return mapping of configuration properties with prefix stripped]]>
+      </doc>
+    </method>
+    <method name="addTags"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="prop" type="java.util.Properties"/>
+      <doc>
+      <![CDATA[Add tags defined in HADOOP_TAGS_SYSTEM, HADOOP_TAGS_CUSTOM.
+ @param prop]]>
+      </doc>
+    </method>
+    <method name="writeXml"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.OutputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write out the non-default properties in this configuration to the given
+ {@link OutputStream} using UTF-8 encoding.
+ 
+ @param out the output stream to write to.]]>
+      </doc>
+    </method>
+    <method name="writeXml"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="writeXml"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="propertyName" type="java.lang.String"/>
+      <param name="out" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+      <doc>
+      <![CDATA[Write out the non-default properties in this configuration to the
+ given {@link Writer}.
+ <ul>
+ <li>
+ When property name is not empty and the property exists in the
+ configuration, this method writes the property and its attributes
+ to the {@link Writer}.
+ </li>
+
+ <li>
+ When property name is null or empty, this method writes all the
+ configuration properties and their attributes to the {@link Writer}.
+ </li>
+
+ <li>
+ When property name is not empty but the property doesn't exist in
+ the configuration, this method throws an {@link IllegalArgumentException}.
+ </li>
+ </ul>
+ @param out the writer to write to.]]>
+      </doc>
+    </method>
+    <method name="dumpConfiguration"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="config" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="propertyName" type="java.lang.String"/>
+      <param name="out" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes properties and their attributes (final and resource)
+  to the given {@link Writer}.
+  <ul>
+  <li>
+  When propertyName is not empty, and the property exists
+  in the configuration, the format of the output would be,
+  <pre>
+  {
+    "property": {
+      "key" : "key1",
+      "value" : "value1",
+      "isFinal" : "key1.isFinal",
+      "resource" : "key1.resource"
+    }
+  }
+  </pre>
+  </li>
+
+  <li>
+  When propertyName is null or empty, it behaves same as
+  {@link #dumpConfiguration(Configuration, Writer)}, the
+  output would be,
+  <pre>
+  { "properties" :
+      [ { key : "key1",
+          value : "value1",
+          isFinal : "key1.isFinal",
+          resource : "key1.resource" },
+        { key : "key2",
+          value : "value2",
+          isFinal : "ke2.isFinal",
+          resource : "key2.resource" }
+       ]
+   }
+  </pre>
+  </li>
+
+  <li>
+  When propertyName is not empty, and the property is not
+  found in the configuration, this method will throw an
+  {@link IllegalArgumentException}.
+  </li>
+  </ul>
+  <p>
+ @param config the configuration
+ @param propertyName property name
+ @param out the Writer to write to
+ @throws IOException
+ @throws IllegalArgumentException when property name is not
+   empty and the property is not found in configuration]]>
+      </doc>
+    </method>
+    <method name="dumpConfiguration"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="config" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="out" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes out all properties and their attributes (final and resource) to
+  the given {@link Writer}, the format of the output would be,
+
+  <pre>
+  { "properties" :
+      [ { key : "key1",
+          value : "value1",
+          isFinal : "key1.isFinal",
+          resource : "key1.resource" },
+        { key : "key2",
+          value : "value2",
+          isFinal : "ke2.isFinal",
+          resource : "key2.resource" }
+       ]
+   }
+  </pre>
+
+  It does not output the properties of the configuration object which
+  is loaded from an input stream.
+  <p>
+
+ @param config the configuration
+ @param out the Writer to write to
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getClassLoader" return="java.lang.ClassLoader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link ClassLoader} for this job.
+
+ @return the correct class loader.]]>
+      </doc>
+    </method>
+    <method name="setClassLoader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="classLoader" type="java.lang.ClassLoader"/>
+      <doc>
+      <![CDATA[Set the class loader that will be used to load the various objects.
+ 
+ @param classLoader the new class loader.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setQuietMode"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="quietmode" type="boolean"/>
+      <doc>
+      <![CDATA[Set the quietness-mode. 
+ 
+ In the quiet-mode, error and informational messages might not be logged.
+ 
+ @param quietmode <code>true</code> to set quiet-mode on, <code>false</code>
+              to turn it off.]]>
+      </doc>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[For debugging.  List non-default properties to the terminal and exit.]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getValByRegex" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="regex" type="java.lang.String"/>
+      <doc>
+      <![CDATA[get keys matching the the regex 
+ @param regex
+ @return {@literal Map<String,String>} with matching keys]]>
+      </doc>
+    </method>
+    <method name="dumpDeprecatedKeys"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hasWarnedDeprecation" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns whether or not a deprecated name has been warned. If the name is not
+ deprecated then always return false]]>
+      </doc>
+    </method>
+    <method name="getAllPropertiesByTag" return="java.util.Properties"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tag" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get all properties belonging to tag.
+ @param tag tag
+ @return Properties with matching tag]]>
+      </doc>
+    </method>
+    <method name="getAllPropertiesByTags" return="java.util.Properties"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tagList" type="java.util.List"/>
+      <doc>
+      <![CDATA[Get all properties belonging to list of input tags. Calls
+ getAllPropertiesByTag internally.
+ @param tagList list of input tags
+ @return Properties with matching tags]]>
+      </doc>
+    </method>
+    <method name="isPropertyTag" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tagStr" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get Property tag Enum corresponding to given source.
+
+ @param tagStr String representation of Enum
+ @return true if tagStr is a valid tag]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Provides access to configuration parameters.
+
+ <h3 id="Resources">Resources</h3>
+
+ <p>Configurations are specified by resources. A resource contains a set of
+ name/value pairs as XML data. Each resource is named by either a 
+ <code>String</code> or by a {@link Path}. If named by a <code>String</code>, 
+ then the classpath is examined for a file with that name.  If named by a 
+ <code>Path</code>, then the local filesystem is examined directly, without 
+ referring to the classpath.
+
+ <p>Unless explicitly turned off, Hadoop by default specifies two 
+ resources, loaded in-order from the classpath: <ol>
+ <li><tt>
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a></tt>: Read-only defaults for hadoop.</li>
+ <li><tt>core-site.xml</tt>: Site-specific configuration for a given hadoop
+ installation.</li>
+ </ol>
+ Applications may add additional resources, which are loaded
+ subsequent to these resources in the order they are added.
+ 
+ <h4 id="FinalParams">Final Parameters</h4>
+
+ <p>Configuration parameters may be declared <i>final</i>. 
+ Once a resource declares a value final, no subsequently-loaded 
+ resource can alter that value.  
+ For example, one might define a final parameter with:
+ <pre><code>
+  &lt;property&gt;
+    &lt;name&gt;dfs.hosts.include&lt;/name&gt;
+    &lt;value&gt;/etc/hadoop/conf/hosts.include&lt;/value&gt;
+    <b>&lt;final&gt;true&lt;/final&gt;</b>
+  &lt;/property&gt;</code></pre>
+
+ Administrators typically define parameters as final in 
+ <tt>core-site.xml</tt> for values that user applications may not alter.
+
+ <h4 id="VariableExpansion">Variable Expansion</h4>
+
+ <p>Value strings are first processed for <i>variable expansion</i>. The
+ available properties are:<ol>
+ <li>Other properties defined in this Configuration; and, if a name is
+ undefined here,</li>
+ <li>Environment variables in {@link System#getenv()} if a name starts with
+ "env.", or</li>
+ <li>Properties in {@link System#getProperties()}.</li>
+ </ol>
+
+ <p>For example, if a configuration resource contains the following property
+ definitions: 
+ <pre><code>
+  &lt;property&gt;
+    &lt;name&gt;basedir&lt;/name&gt;
+    &lt;value&gt;/user/${<i>user.name</i>}&lt;/value&gt;
+  &lt;/property&gt;
+  
+  &lt;property&gt;
+    &lt;name&gt;tempdir&lt;/name&gt;
+    &lt;value&gt;${<i>basedir</i>}/tmp&lt;/value&gt;
+  &lt;/property&gt;
+
+  &lt;property&gt;
+    &lt;name&gt;otherdir&lt;/name&gt;
+    &lt;value&gt;${<i>env.BASE_DIR</i>}/other&lt;/value&gt;
+  &lt;/property&gt;
+  </code></pre>
+
+ <p>When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt>
+ will be resolved to another property in this Configuration, while
+ <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value
+ of the System property with that name.
+ <p>When <tt>conf.get("otherdir")</tt> is called, then <tt>${<i>env.BASE_DIR</i>}</tt>
+ will be resolved to the value of the <tt>${<i>BASE_DIR</i>}</tt> environment variable.
+ It supports <tt>${<i>env.NAME:-default</i>}</tt> and <tt>${<i>env.NAME-default</i>}</tt> notations.
+ The former is resolved to "default" if <tt>${<i>NAME</i>}</tt> environment variable is undefined
+ or its value is empty.
+ The latter behaves the same way only if <tt>${<i>NAME</i>}</tt> is undefined.
+ <p>By default, warnings will be given to any deprecated configuration 
+ parameters and these are suppressible by configuring
+ <tt>log4j.logger.org.apache.hadoop.conf.Configuration.deprecation</tt> in
+ log4j.properties file.
+
+ <h4 id="Tags">Tags</h4>
+
+ <p>Optionally we can tag related properties together by using tag
+ attributes. System tags are defined by hadoop.tags.system property. Users
+ can define there own custom tags in  hadoop.tags.custom property.
+
+ <p>For example, we can tag existing property as:
+ <pre><code>
+  &lt;property&gt;
+    &lt;name&gt;dfs.replication&lt;/name&gt;
+    &lt;value&gt;3&lt;/value&gt;
+    &lt;tag&gt;HDFS,REQUIRED&lt;/tag&gt;
+  &lt;/property&gt;
+
+  &lt;property&gt;
+    &lt;name&gt;dfs.data.transfer.protection&lt;/name&gt;
+    &lt;value&gt;3&lt;/value&gt;
+    &lt;tag&gt;HDFS,SECURITY&lt;/tag&gt;
+  &lt;/property&gt;
+ </code></pre>
+ <p> Properties marked with tags can be retrieved with <tt>conf
+ .getAllPropertiesByTag("HDFS")</tt> or <tt>conf.getAllPropertiesByTags
+ (Arrays.asList("YARN","SECURITY"))</tt>.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.conf.Configuration -->
+  <!-- start class org.apache.hadoop.conf.Configured -->
+  <class name="Configured" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="Configured"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a Configured.]]>
+      </doc>
+    </constructor>
+    <constructor name="Configured" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a Configured.]]>
+      </doc>
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Base class for things that may be configured with a {@link Configuration}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.conf.Configured -->
+  <doc>
+  <![CDATA[Configuration of system parameters.]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.crypto">
+</package>
+<package name="org.apache.hadoop.crypto.key">
+  <!-- start class org.apache.hadoop.crypto.key.KeyProvider -->
+  <class name="KeyProvider" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Closeable"/>
+    <constructor name="KeyProvider" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor.
+ 
+ @param conf configuration for the provider]]>
+      </doc>
+    </constructor>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the provider configuration.
+ 
+ @return the provider configuration]]>
+      </doc>
+    </method>
+    <method name="options" return="org.apache.hadoop.crypto.key.KeyProvider.Options"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[A helper function to create an options object.
+ @param conf the configuration to use
+ @return a new options object]]>
+      </doc>
+    </method>
+    <method name="isTransient" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Indicates whether this provider represents a store
+ that is intended for transient use - such as the UserProvider
+ is. These providers are generally used to provide access to
+ keying material rather than for long term storage.
+ @return true if transient, false otherwise]]>
+      </doc>
+    </method>
+    <method name="getKeyVersion" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="versionName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the key material for a specific version of the key. This method is used
+ when decrypting data.
+ @param versionName the name of a specific version of the key
+ @return the key material
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getKeys" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the key names for all keys.
+ @return the list of key names
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getKeysMetadata" return="org.apache.hadoop.crypto.key.KeyProvider.Metadata[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="names" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get key metadata in bulk.
+ @param names the names of the keys to get
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getKeyVersions" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the key material for all versions of a specific key name.
+ @return the list of key material
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getCurrentKey" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the current version of the key, which should be used for encrypting new
+ data.
+ @param name the base name of the key
+ @return the version name of the current version of the key or null if the
+    key version doesn't exist
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getMetadata" return="org.apache.hadoop.crypto.key.KeyProvider.Metadata"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get metadata about the key.
+ @param name the basename of the key
+ @return the key's metadata or null if the key doesn't exist
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createKey" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="material" type="byte[]"/>
+      <param name="options" type="org.apache.hadoop.crypto.key.KeyProvider.Options"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a new key. The given key must not already exist.
+ @param name the base name of the key
+ @param material the key material for the first version of the key.
+ @param options the options for the new key.
+ @return the version name of the first version of the key.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="generateKey" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="size" type="int"/>
+      <param name="algorithm" type="java.lang.String"/>
+      <exception name="NoSuchAlgorithmException" type="java.security.NoSuchAlgorithmException"/>
+      <doc>
+      <![CDATA[Generates a key material.
+
+ @param size length of the key.
+ @param algorithm algorithm to use for generating the key.
+ @return the generated key.
+ @throws NoSuchAlgorithmException]]>
+      </doc>
+    </method>
+    <method name="createKey" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="options" type="org.apache.hadoop.crypto.key.KeyProvider.Options"/>
+      <exception name="NoSuchAlgorithmException" type="java.security.NoSuchAlgorithmException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a new key generating the material for it.
+ The given key must not already exist.
+ <p>
+ This implementation generates the key material and calls the
+ {@link #createKey(String, byte[], Options)} method.
+
+ @param name the base name of the key
+ @param options the options for the new key.
+ @return the version name of the first version of the key.
+ @throws IOException
+ @throws NoSuchAlgorithmException]]>
+      </doc>
+    </method>
+    <method name="deleteKey"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Delete the given key.
+ @param name the name of the key to delete
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="rollNewVersion" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="material" type="byte[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Roll a new version of the given key.
+ @param name the basename of the key
+ @param material the new key material
+ @return the name of the new version of the key
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Can be used by implementing classes to close any resources
+ that require closing]]>
+      </doc>
+    </method>
+    <method name="rollNewVersion" return="org.apache.hadoop.crypto.key.KeyProvider.KeyVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="NoSuchAlgorithmException" type="java.security.NoSuchAlgorithmException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Roll a new version of the given key generating the material for it.
+ <p>
+ This implementation generates the key material and calls the
+ {@link #rollNewVersion(String, byte[])} method.
+
+ @param name the basename of the key
+ @return the name of the new version of the key
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="invalidateCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Can be used by implementing classes to invalidate the caches. This could be
+ used after rollNewVersion to provide a strong guarantee to return the new
+ version of the given key.
+
+ @param name the basename of the key
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="flush"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Ensures that any changes to the keys are written to persistent store.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getBaseName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="versionName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Split the versionName in to a base name. Converts "/aaa/bbb/3" to
+ "/aaa/bbb".
+ @param versionName the version name to split
+ @return the base name of the key
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="buildVersionName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="version" type="int"/>
+      <doc>
+      <![CDATA[Build a version string from a basename and version number. Converts
+ "/aaa/bbb" and 3 to "/aaa/bbb@3".
+ @param name the basename of the key
+ @param version the version of the key
+ @return the versionName of the key.]]>
+      </doc>
+    </method>
+    <method name="findProvider" return="org.apache.hadoop.crypto.key.KeyProvider"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="providerList" type="java.util.List"/>
+      <param name="keyName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Find the provider with the given key.
+ @param providerList the list of providers
+ @param keyName the key name we are looking for
+ @return the KeyProvider that has the key]]>
+      </doc>
+    </method>
+    <method name="needsPassword" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Does this provider require a password? This means that a password is
+ required for normal operation, and it has not been found through normal
+ means. If true, the password should be provided by the caller using
+ setPassword().
+ @return Whether or not the provider requires a password
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="noPasswordWarning" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[If a password for the provider is needed, but is not provided, this will
+ return a warning and instructions for supplying said password to the
+ provider.
+ @return A warning and instructions for supplying the password]]>
+      </doc>
+    </method>
+    <method name="noPasswordError" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[If a password for the provider is needed, but is not provided, this will
+ return an error message and instructions for supplying said password to
+ the provider.
+ @return An error message and instructions for supplying the password]]>
+      </doc>
+    </method>
+    <field name="DEFAULT_CIPHER_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_CIPHER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_BITLENGTH_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_BITLENGTH" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="JCEKS_KEY_SERIALFILTER_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="JCEKS_KEY_SERIAL_FILTER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A provider of secret key material for Hadoop applications. Provides an
+ abstraction to separate key storage from users of encryption. It
+ is intended to support getting or storing keys in a variety of ways,
+ including third party bindings.
+ <p>
+ <code>KeyProvider</code> implementations must be thread safe.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.crypto.key.KeyProvider -->
+  <!-- start class org.apache.hadoop.crypto.key.KeyProviderFactory -->
+  <class name="KeyProviderFactory" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="KeyProviderFactory"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createProvider" return="org.apache.hadoop.crypto.key.KeyProvider"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="providerName" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getProviders" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="get" return="org.apache.hadoop.crypto.key.KeyProvider"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a KeyProvider based on a provided URI.
+
+ @param uri key provider URI
+ @param conf configuration to initialize the key provider
+ @return the key provider for the specified URI, or <code>NULL</code> if
+         a provider for the specified URI scheme could not be found.
+ @throws IOException thrown if the provider failed to initialize.]]>
+      </doc>
+    </method>
+    <field name="KEY_PROVIDER_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A factory to create a list of KeyProvider based on the path given in a
+ Configuration. It uses a service loader interface to find the available
+ KeyProviders and create them based on the list of URIs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.crypto.key.KeyProviderFactory -->
+</package>
+<package name="org.apache.hadoop.crypto.key.kms">
+</package>
+<package name="org.apache.hadoop.crypto.random">
+</package>
+<package name="org.apache.hadoop.fs">
+  <!-- start interface org.apache.hadoop.fs.Abortable -->
+  <interface name="Abortable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="abort" return="org.apache.hadoop.fs.Abortable.AbortableResult"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Abort the active operation without the output becoming visible.
+
+ This is to provide ability to cancel the write on stream; once
+ a stream is aborted, the write MUST NOT become visible.
+
+ @throws UnsupportedOperationException if the operation is not supported.
+ @return the result.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Abort data being written to  a stream, so that close() does
+  not write the data. It is implemented by output streams in
+  some object stores, and passed through {@link FSDataOutputStream}.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.Abortable -->
+  <!-- start class org.apache.hadoop.fs.AbstractFileSystem -->
+  <class name="AbstractFileSystem" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.fs.PathCapabilities"/>
+    <constructor name="AbstractFileSystem" type="java.net.URI, java.lang.String, boolean, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="URISyntaxException" type="java.net.URISyntaxException"/>
+      <doc>
+      <![CDATA[Constructor to be called by subclasses.
+ 
+ @param uri for this file system.
+ @param supportedScheme the scheme supported by the implementor
+ @param authorityNeeded if true then theURI must have authority, if false
+          then the URI must have null authority.
+
+ @throws URISyntaxException <code>uri</code> has syntax error]]>
+      </doc>
+    </constructor>
+    <method name="getStatistics" return="org.apache.hadoop.fs.FileSystem.Statistics"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isValidName" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns true if the specified string is considered valid in the path part
+ of a URI by this file system.  The default implementation enforces the rules
+ of HDFS, but subclasses may override this method to implement specific
+ validation rules for specific file systems.
+ 
+ @param src String source filename to check, path part of the URI
+ @return boolean true if the specified string is considered valid]]>
+      </doc>
+    </method>
+    <method name="createFileSystem" return="org.apache.hadoop.fs.AbstractFileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <doc>
+      <![CDATA[Create a file system instance for the specified uri using the conf. The
+ conf is used to find the class name that implements the file system. The
+ conf is also passed to the file system for its configuration.
+
+ @param uri URI of the file system
+ @param conf Configuration for the file system
+ 
+ @return Returns the file system for the given URI
+
+ @throws UnsupportedFileSystemException file system for <code>uri</code> is
+           not found]]>
+      </doc>
+    </method>
+    <method name="getStatistics" return="org.apache.hadoop.fs.FileSystem.Statistics"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <doc>
+      <![CDATA[Get the statistics for a particular file system.
+ 
+ @param uri
+          used as key to lookup STATISTICS_TABLE. Only scheme and authority
+          part of the uri are used.
+ @return a statistics object]]>
+      </doc>
+    </method>
+    <method name="clearStatistics"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="printStatistics"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Prints statistics for all file systems.]]>
+      </doc>
+    </method>
+    <method name="getAllStatistics" return="java.util.Map"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="get" return="org.apache.hadoop.fs.AbstractFileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <doc>
+      <![CDATA[The main factory method for creating a file system. Get a file system for
+ the URI's scheme and authority. The scheme of the <code>uri</code>
+ determines a configuration property name,
+ <tt>fs.AbstractFileSystem.<i>scheme</i>.impl</tt> whose value names the
+ AbstractFileSystem class.
+ 
+ The entire URI and conf is passed to the AbstractFileSystem factory method.
+ 
+ @param uri for the file system to be created.
+ @param conf which is passed to the file system impl.
+ 
+ @return file system for the given URI.
+ 
+ @throws UnsupportedFileSystemException if the file system for
+           <code>uri</code> is not supported.]]>
+      </doc>
+    </method>
+    <method name="checkScheme"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <param name="supportedScheme" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Check that the Uri's scheme matches
+ @param uri
+ @param supportedScheme]]>
+      </doc>
+    </method>
+    <method name="getUriDefaultPort" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default port of this file system.
+ 
+ @return default port of this file system's Uri scheme
+         A uri with a port of -1 =&gt; default port;]]>
+      </doc>
+    </method>
+    <method name="getUri" return="java.net.URI"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.
+ 
+ @return the uri of this file system.]]>
+      </doc>
+    </method>
+    <method name="checkPath"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Check that a Path belongs to this FileSystem.
+ 
+ If the path is fully qualified URI, then its scheme and authority
+ matches that of this file system. Otherwise the path must be 
+ slash-relative name.
+ 
+ @throws InvalidPathException if the path is invalid]]>
+      </doc>
+    </method>
+    <method name="getUriPath" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Get the path-part of a pathname. Checks that URI matches this file system
+ and that the path-part is a valid name.
+ 
+ @param p path
+ 
+ @return path-part of the Path p]]>
+      </doc>
+    </method>
+    <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Make the path fully qualified to this file system
+ @param path
+ @return the qualified path]]>
+      </doc>
+    </method>
+    <method name="getInitialWorkingDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Some file systems like LocalFileSystem have an initial workingDir
+ that is used as the starting workingDir. For other file systems
+ like HDFS there is no built in notion of an initial workingDir.
+ 
+ @return the initial workingDir if the file system has such a notion
+         otherwise return a null.]]>
+      </doc>
+    </method>
+    <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the current user's home directory in this file system.
+ The default implementation returns "/user/$USER/".
+ 
+ @return current user's home directory.]]>
+      </doc>
+    </method>
+    <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #getServerDefaults(Path)} instead">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return a set of server default configuration values.
+ 
+ @return server default configuration values
+ 
+ @throws IOException an I/O error occurred
+ @deprecated use {@link #getServerDefaults(Path)} instead]]>
+      </doc>
+    </method>
+    <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return a set of server default configuration values based on path.
+ @param f path to fetch server defaults
+ @return server default configuration values for path
+ @throws IOException an I/O error occurred]]>
+      </doc>
+    </method>
+    <method name="resolvePath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the fully-qualified path of path f resolving the path
+ through any internal symlinks or mount point
+ @param p path to be resolved
+ @return fully qualified path 
+ @throws FileNotFoundException
+ @throws AccessControlException
+ @throws IOException
+ @throws UnresolvedLinkException if symbolic link on path cannot be
+ resolved internally]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="createFlag" type="java.util.EnumSet"/>
+      <param name="opts" type="org.apache.hadoop.fs.Options.CreateOpts[]"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#create(Path, EnumSet, Options.CreateOpts...)} except
+ that the Path f must be fully qualified and the permission is absolute
+ (i.e. umask has been applied).]]>
+      </doc>
+    </method>
+    <method name="createInternal" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="flag" type="java.util.EnumSet"/>
+      <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <param name="checksumOpt" type="org.apache.hadoop.fs.Options.ChecksumOpt"/>
+      <param name="createParent" type="boolean"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link #create(Path, EnumSet, Options.CreateOpts...)} except that the opts
+ have been declared explicitly.]]>
+      </doc>
+    </method>
+    <method name="mkdir"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dir" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="createParent" type="boolean"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#mkdir(Path, FsPermission, boolean)} except that the Path
+ f must be fully qualified and the permission is absolute (i.e. 
+ umask has been applied).]]>
+      </doc>
+    </method>
+    <method name="delete" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="recursive" type="boolean"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#delete(Path, boolean)} except that Path f must be for
+ this file system.]]>
+      </doc>
+    </method>
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#open(Path)} except that Path f must be for this
+ file system.]]>
+      </doc>
+    </method>
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="bufferSize" type="int"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#open(Path, int)} except that Path f must be for this
+ file system.]]>
+      </doc>
+    </method>
+    <method name="truncate" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="newLength" type="long"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#truncate(Path, long)} except that Path f must be for
+ this file system.]]>
+      </doc>
+    </method>
+    <method name="setReplication" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="replication" type="short"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#setReplication(Path, short)} except that Path f must be
+ for this file system.]]>
+      </doc>
+    </method>
+    <method name="rename"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
+ f must be for this file system.]]>
+      </doc>
+    </method>
+    <method name="renameInternal"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
+ f must be for this file system and NO OVERWRITE is performed.
+ 
+ File systems that do not have a built in overwrite need implement only this
+ method and can take advantage of the default impl of the other
+ {@link #renameInternal(Path, Path, boolean)}]]>
+      </doc>
+    </method>
+    <method name="renameInternal"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <param name="overwrite" type="boolean"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#rename(Path, Path, Options.Rename...)} except that Path
+ f must be for this file system.]]>
+      </doc>
+    </method>
+    <method name="supportsSymlinks" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns true if the file system supports symlinks, false otherwise.
+ @return true if filesystem supports symlinks]]>
+      </doc>
+    </method>
+    <method name="createSymlink"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="target" type="org.apache.hadoop.fs.Path"/>
+      <param name="link" type="org.apache.hadoop.fs.Path"/>
+      <param name="createParent" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of  
+ {@link FileContext#createSymlink(Path, Path, boolean)};]]>
+      </doc>
+    </method>
+    <method name="getLinkTarget" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Partially resolves the path. This is used during symlink resolution in
+ {@link FSLinkResolver}, and differs from the similarly named method
+ {@link FileContext#getLinkTarget(Path)}.
+ @throws IOException subclass implementations may throw IOException]]>
+      </doc>
+    </method>
+    <method name="setPermission"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#setPermission(Path, FsPermission)} except that Path f
+ must be for this file system.]]>
+      </doc>
+    </method>
+    <method name="setOwner"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="username" type="java.lang.String"/>
+      <param name="groupname" type="java.lang.String"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#setOwner(Path, String, String)} except that Path f must
+ be for this file system.]]>
+      </doc>
+    </method>
+    <method name="setTimes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="mtime" type="long"/>
+      <param name="atime" type="long"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#setTimes(Path, long, long)} except that Path f must be
+ for this file system.]]>
+      </doc>
+    </method>
+    <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#getFileChecksum(Path)} except that Path f must be for
+ this file system.]]>
+      </doc>
+    </method>
+    <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#getFileStatus(Path)} 
+ except that an UnresolvedLinkException may be thrown if a symlink is 
+ encountered in the path.]]>
+      </doc>
+    </method>
+    <method name="msync"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+      <doc>
+      <![CDATA[Synchronize client metadata state.
+ <p>
+ In some FileSystem implementations such as HDFS metadata
+ synchronization is essential to guarantee consistency of read requests
+ particularly in HA setting.
+ @throws IOException
+ @throws UnsupportedOperationException]]>
+      </doc>
+    </method>
+    <method name="getFileLinkStatus" return="org.apache.hadoop.fs.FileStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#getFileLinkStatus(Path)}
+ except that an UnresolvedLinkException may be thrown if a symlink is  
+ encountered in the path leading up to the final path component.
+ If the file system does not support symlinks then the behavior is
+ equivalent to {@link AbstractFileSystem#getFileStatus(Path)}.]]>
+      </doc>
+    </method>
+    <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="start" type="long"/>
+      <param name="len" type="long"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#getFileBlockLocations(Path, long, long)} except that
+ Path f must be for this file system.]]>
+      </doc>
+    </method>
+    <method name="getFsStatus" return="org.apache.hadoop.fs.FsStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#getFsStatus(Path)} except that Path f must be for this
+ file system.]]>
+      </doc>
+    </method>
+    <method name="getFsStatus" return="org.apache.hadoop.fs.FsStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#getFsStatus(Path)}.]]>
+      </doc>
+    </method>
+    <method name="listStatusIterator" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#listStatus(Path)} except that Path f must be for this
+ file system.]]>
+      </doc>
+    </method>
+    <method name="listLocatedStatus" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#listLocatedStatus(Path)} except that Path f 
+ must be for this file system.
+
+ In HDFS implementation, the BlockLocation of returned LocatedFileStatus
+ will have different formats for replicated and erasure coded file. Please
+ refer to {@link FileSystem#getFileBlockLocations(FileStatus, long, long)}
+ for more details.]]>
+      </doc>
+    </method>
+    <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext.Util#listStatus(Path)} except that Path f must be 
+ for this file system.]]>
+      </doc>
+    </method>
+    <method name="listCorruptFileBlocks" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@return an iterator over the corrupt files under the given path
+ (may contain duplicates if a file has more than one corrupt block)
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setVerifyChecksum"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="verifyChecksum" type="boolean"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#setVerifyChecksum(boolean, Path)} except that Path f
+ must be for this file system.]]>
+      </doc>
+    </method>
+    <method name="getCanonicalServiceName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a canonical name for this file system.
+ @return a URI string that uniquely identifies this file system]]>
+      </doc>
+    </method>
+    <method name="modifyAclEntries"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="aclSpec" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Modifies ACL entries of files and directories.  This method can add new ACL
+ entries or modify the permissions on existing ACL entries.  All existing
+ ACL entries that are not specified in this call are retained without
+ changes.  (Modifications are merged into the current ACL.)
+
+ @param path Path to modify
+ @param aclSpec List{@literal <AclEntry>} describing modifications
+ @throws IOException if an ACL could not be modified]]>
+      </doc>
+    </method>
+    <method name="removeAclEntries"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="aclSpec" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Removes ACL entries from files and directories.  Other ACL entries are
+ retained.
+
+ @param path Path to modify
+ @param aclSpec List{@literal <AclEntry>} describing entries to remove
+ @throws IOException if an ACL could not be modified]]>
+      </doc>
+    </method>
+    <method name="removeDefaultAcl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Removes all default ACL entries from files and directories.
+
+ @param path Path to modify
+ @throws IOException if an ACL could not be modified]]>
+      </doc>
+    </method>
+    <method name="removeAcl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Removes all but the base ACL entries of files and directories.  The entries
+ for user, group, and others are retained for compatibility with permission
+ bits.
+
+ @param path Path to modify
+ @throws IOException if an ACL could not be removed]]>
+      </doc>
+    </method>
+    <method name="setAcl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="aclSpec" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Fully replaces ACL of files and directories, discarding all existing
+ entries.
+
+ @param path Path to modify
+ @param aclSpec List{@literal <AclEntry>} describing modifications, must
+ include entries for user, group, and others for compatibility with
+ permission bits.
+ @throws IOException if an ACL could not be modified]]>
+      </doc>
+    </method>
+    <method name="getAclStatus" return="org.apache.hadoop.fs.permission.AclStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets the ACLs of files and directories.
+
+ @param path Path to get
+ @return RemoteIterator{@literal <AclStatus>} which returns each AclStatus
+ @throws IOException if an ACL could not be read]]>
+      </doc>
+    </method>
+    <method name="setXAttr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="byte[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set an xattr of a file or directory.
+ The name must be prefixed with the namespace followed by ".". For example,
+ "user.attr".
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to modify
+ @param name xattr name.
+ @param value xattr value.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setXAttr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="byte[]"/>
+      <param name="flag" type="java.util.EnumSet"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set an xattr of a file or directory.
+ The name must be prefixed with the namespace followed by ".". For example,
+ "user.attr".
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to modify
+ @param name xattr name.
+ @param value xattr value.
+ @param flag xattr set flag
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getXAttr" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get an xattr for a file or directory.
+ The name must be prefixed with the namespace followed by ".". For example,
+ "user.attr".
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to get extended attribute
+ @param name xattr name.
+ @return byte[] xattr value.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getXAttrs" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get all of the xattrs for a file or directory.
+ Only those xattrs for which the logged-in user has permissions to view
+ are returned.
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to get extended attributes
+
+ @return {@literal Map<String, byte[]>} describing the XAttrs of the file
+ or directory
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getXAttrs" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="names" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get all of the xattrs for a file or directory.
+ Only those xattrs for which the logged-in user has permissions to view
+ are returned.
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to get extended attributes
+ @param names XAttr names.
+ @return {@literal Map<String, byte[]>} describing the XAttrs of the file
+ or directory
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="listXAttrs" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get all of the xattr names for a file or directory.
+ Only the xattr names for which the logged-in user has permissions to view
+ are returned.
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to get extended attributes
+ @return {@literal Map<String, byte[]>} describing the XAttrs of the file
+ or directory
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="removeXAttr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Remove an xattr of a file or directory.
+ The name must be prefixed with the namespace followed by ".". For example,
+ "user.attr".
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to remove extended attribute
+ @param name xattr name
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createSnapshot" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="snapshotName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#createSnapshot(Path, String)}.]]>
+      </doc>
+    </method>
+    <method name="renameSnapshot"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="snapshotOldName" type="java.lang.String"/>
+      <param name="snapshotNewName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#renameSnapshot(Path, String, String)}.]]>
+      </doc>
+    </method>
+    <method name="deleteSnapshot"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="snapshotDir" type="org.apache.hadoop.fs.Path"/>
+      <param name="snapshotName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The specification of this method matches that of
+ {@link FileContext#deleteSnapshot(Path, String)}.]]>
+      </doc>
+    </method>
+    <method name="satisfyStoragePolicy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the source path to satisfy storage policy.
+ @param path The source path referring to either a directory or a file.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setStoragePolicy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="policyName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the storage policy for a given file or directory.
+
+ @param path file or directory path.
+ @param policyName the name of the target storage policy. The list
+                   of supported Storage policies can be retrieved
+                   via {@link #getAllStoragePolicies}.]]>
+      </doc>
+    </method>
+    <method name="unsetStoragePolicy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Unset the storage policy set for a given file or directory.
+ @param src file or directory path.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getStoragePolicy" return="org.apache.hadoop.fs.BlockStoragePolicySpi"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Retrieve the storage policy for a given file or directory.
+
+ @param src file or directory path.
+ @return storage policy for give file.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAllStoragePolicies" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Retrieve all the storage policies supported by this file system.
+
+ @return all storage policies supported by this filesystem.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="openFileWithOptions" return="java.util.concurrent.CompletableFuture"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="parameters" type="org.apache.hadoop.fs.impl.OpenFileParameters"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Open a file with the given set of options.
+ The base implementation performs a blocking
+ call to {@link #open(Path, int)}in this call;
+ the actual outcome is in the returned {@code CompletableFuture}.
+ This avoids having to create some thread pool, while still
+ setting up the expectation that the {@code get()} call
+ is needed to evaluate the result.
+ @param path path to the file
+ @param parameters open file parameters from the builder.
+ @return a future which will evaluate to the opened file.
+ @throws IOException failure to resolve the link.
+ @throws IllegalArgumentException unknown mandatory key]]>
+      </doc>
+    </method>
+    <method name="hasPathCapability" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="capability" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createMultipartUploader" return="org.apache.hadoop.fs.MultipartUploaderBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="basePath" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a multipart uploader.
+ @param basePath file path under which all files are uploaded
+ @return a MultipartUploaderBuilder object to build the uploader
+ @throws IOException if some early checks cause IO failures.
+ @throws UnsupportedOperationException if support is checked early.]]>
+      </doc>
+    </method>
+    <method name="methodNotSupported"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Helper method that throws an {@link UnsupportedOperationException} for the
+ current {@link FileSystem} method being called.]]>
+      </doc>
+    </method>
+    <field name="statistics" type="org.apache.hadoop.fs.FileSystem.Statistics"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The statistics for this file system.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[This class provides an interface for implementors of a Hadoop file system
+ (analogous to the VFS of Unix). Applications do not access this class;
+ instead they access files across all file systems using {@link FileContext}.
+ 
+ Pathnames passed to AbstractFileSystem can be fully qualified URI that
+ matches the "this" file system (ie same scheme and authority) 
+ or a Slash-relative name that is assumed to be relative
+ to the root of the "this" file system .]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.AbstractFileSystem -->
+  <!-- start class org.apache.hadoop.fs.AvroFSInput -->
+  <class name="AvroFSInput" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Closeable"/>
+    <implements name="org.apache.avro.file.SeekableInput"/>
+    <constructor name="AvroFSInput" type="org.apache.hadoop.fs.FSDataInputStream, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct given an {@link FSDataInputStream} and its length.]]>
+      </doc>
+    </constructor>
+    <constructor name="AvroFSInput" type="org.apache.hadoop.fs.FileContext, org.apache.hadoop.fs.Path"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct given a {@link FileContext} and a {@link Path}.]]>
+      </doc>
+    </constructor>
+    <method name="length" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="read" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="seek"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="tell" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Adapts an {@link FSDataInputStream} to Avro's SeekableInput interface.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.AvroFSInput -->
+  <!-- start interface org.apache.hadoop.fs.BatchListingOperations -->
+  <interface name="BatchListingOperations"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="batchedListStatusIterator" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="paths" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Batched listing API that returns {@link PartialListing}s for the
+ passed Paths.
+
+ @param paths List of paths to list.
+ @return RemoteIterator that returns corresponding PartialListings.
+ @throws IOException failure]]>
+      </doc>
+    </method>
+    <method name="batchedListLocatedStatusIterator" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="paths" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Batched listing API that returns {@link PartialListing}s for the passed
+ Paths. The PartialListing will contain {@link LocatedFileStatus} entries
+ with locations.
+
+ @param paths List of paths to list.
+ @return RemoteIterator that returns corresponding PartialListings.
+ @throws IOException failure]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface filesystems MAY implement to offer a batched list.
+ If implemented, filesystems SHOULD declare
+ {@link CommonPathCapabilities#FS_EXPERIMENTAL_BATCH_LISTING} to be a supported
+ path capability.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.BatchListingOperations -->
+  <!-- start class org.apache.hadoop.fs.BlockLocation -->
+  <class name="BlockLocation" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Serializable"/>
+    <constructor name="BlockLocation"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default Constructor.]]>
+      </doc>
+    </constructor>
+    <constructor name="BlockLocation" type="org.apache.hadoop.fs.BlockLocation"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Copy constructor.]]>
+      </doc>
+    </constructor>
+    <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], long, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor with host, name, offset and length.]]>
+      </doc>
+    </constructor>
+    <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], long, long, boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor with host, name, offset, length and corrupt flag.]]>
+      </doc>
+    </constructor>
+    <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], java.lang.String[], long, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor with host, name, network topology, offset and length.]]>
+      </doc>
+    </constructor>
+    <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], java.lang.String[], long, long, boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor with host, name, network topology, offset, length 
+ and corrupt flag.]]>
+      </doc>
+    </constructor>
+    <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], java.lang.String[], java.lang.String[], long, long, boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], java.lang.String[], java.lang.String[], java.lang.String[], org.apache.hadoop.fs.StorageType[], long, long, boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getHosts" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the list of hosts (hostname) hosting this block.]]>
+      </doc>
+    </method>
+    <method name="getCachedHosts" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of hosts (hostname) hosting a cached replica of the block.]]>
+      </doc>
+    </method>
+    <method name="getNames" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the list of names (IP:xferPort) hosting this block.]]>
+      </doc>
+    </method>
+    <method name="getTopologyPaths" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the list of network topology paths for each of the hosts.
+ The last component of the path is the "name" (IP:xferPort).]]>
+      </doc>
+    </method>
+    <method name="getStorageIds" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the storageID of each replica of the block.]]>
+      </doc>
+    </method>
+    <method name="getStorageTypes" return="org.apache.hadoop.fs.StorageType[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the storage type of each replica of the block.]]>
+      </doc>
+    </method>
+    <method name="getOffset" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the start offset of file associated with this block.]]>
+      </doc>
+    </method>
+    <method name="getLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the length of the block.]]>
+      </doc>
+    </method>
+    <method name="isCorrupt" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the corrupt flag.]]>
+      </doc>
+    </method>
+    <method name="isStriped" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return true if the block is striped (erasure coded).]]>
+      </doc>
+    </method>
+    <method name="setOffset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="offset" type="long"/>
+      <doc>
+      <![CDATA[Set the start offset of file associated with this block.]]>
+      </doc>
+    </method>
+    <method name="setLength"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="length" type="long"/>
+      <doc>
+      <![CDATA[Set the length of block.]]>
+      </doc>
+    </method>
+    <method name="setCorrupt"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="corrupt" type="boolean"/>
+      <doc>
+      <![CDATA[Set the corrupt flag.]]>
+      </doc>
+    </method>
+    <method name="setHosts"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hosts" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the hosts hosting this block.]]>
+      </doc>
+    </method>
+    <method name="setCachedHosts"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cachedHosts" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Set the hosts hosting a cached replica of this block.]]>
+      </doc>
+    </method>
+    <method name="setNames"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="names" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the names (host:port) hosting this block.]]>
+      </doc>
+    </method>
+    <method name="setTopologyPaths"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="topologyPaths" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the network topology paths of the hosts.]]>
+      </doc>
+    </method>
+    <method name="setStorageIds"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="storageIds" type="java.lang.String[]"/>
+    </method>
+    <method name="setStorageTypes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="storageTypes" type="org.apache.hadoop.fs.StorageType[]"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Represents the network location of a block, information about the hosts
+ that contain block replicas, and other block metadata (E.g. the file
+ offset associated with the block, length, whether it is corrupt, etc).
+
+ For a single BlockLocation, it will have different meanings for replicated
+ and erasure coded files.
+
+ If the file is 3-replicated, offset and length of a BlockLocation represent
+ the absolute value in the file and the hosts are the 3 datanodes that
+ holding the replicas. Here is an example:
+ <pre>
+ BlockLocation(offset: 0, length: BLOCK_SIZE,
+   hosts: {"host1:9866", "host2:9866, host3:9866"})
+ </pre>
+
+ And if the file is erasure-coded, each BlockLocation represents a logical
+ block groups. Value offset is the offset of a block group in the file and
+ value length is the total length of a block group. Hosts of a BlockLocation
+ are the datanodes that holding all the data blocks and parity blocks of a
+ block group.
+ Suppose we have a RS_3_2 coded file (3 data units and 2 parity units).
+ A BlockLocation example will be like:
+ <pre>
+ BlockLocation(offset: 0, length: 3 * BLOCK_SIZE, hosts: {"host1:9866",
+   "host2:9866","host3:9866","host4:9866","host5:9866"})
+ </pre>
+
+ Please refer to
+ {@link FileSystem#getFileBlockLocations(FileStatus, long, long)} or
+ {@link FileContext#getFileBlockLocations(Path, long, long)}
+ for more examples.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.BlockLocation -->
+  <!-- start interface org.apache.hadoop.fs.BlockStoragePolicySpi -->
+  <interface name="BlockStoragePolicySpi"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the name of the storage policy. Policies are uniquely
+ identified by name.
+
+ @return the name of the storage policy.]]>
+      </doc>
+    </method>
+    <method name="getStorageTypes" return="org.apache.hadoop.fs.StorageType[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the preferred storage types associated with this policy. These
+ storage types are used sequentially for successive block replicas.
+
+ @return preferred storage types used for placing block replicas.]]>
+      </doc>
+    </method>
+    <method name="getCreationFallbacks" return="org.apache.hadoop.fs.StorageType[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the fallback storage types for creating new block replicas. Fallback
+ storage types are used if the preferred storage types are not available.
+
+ @return fallback storage types for new block replicas..]]>
+      </doc>
+    </method>
+    <method name="getReplicationFallbacks" return="org.apache.hadoop.fs.StorageType[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the fallback storage types for replicating existing block replicas.
+ Fallback storage types are used if the preferred storage types are not
+ available.
+
+ @return fallback storage types for replicating existing block replicas.]]>
+      </doc>
+    </method>
+    <method name="isCopyOnCreateFile" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns true if the policy is inherit-only and cannot be changed for
+ an existing file.
+
+ @return true if the policy is inherit-only.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A storage policy specifies the placement of block replicas on specific
+ storage types.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.BlockStoragePolicySpi -->
+  <!-- start interface org.apache.hadoop.fs.ByteBufferPositionedReadable -->
+  <interface name="ByteBufferPositionedReadable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="read" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="position" type="long"/>
+      <param name="buf" type="java.nio.ByteBuffer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Reads up to {@code buf.remaining()} bytes into buf from a given position
+ in the file and returns the number of bytes read. Callers should use
+ {@code buf.limit(...)} to control the size of the desired read and
+ {@code buf.position(...)} to control the offset into the buffer the data
+ should be written to.
+ <p>
+ After a successful call, {@code buf.position()} will be advanced by the
+ number of bytes read and {@code buf.limit()} will be unchanged.
+ <p>
+ In the case of an exception, the state of the buffer (the contents of the
+ buffer, the {@code buf.position()}, the {@code buf.limit()}, etc.) is
+ undefined, and callers should be prepared to recover from this
+ eventuality.
+ <p>
+ Callers should use {@link StreamCapabilities#hasCapability(String)} with
+ {@link StreamCapabilities#PREADBYTEBUFFER} to check if the underlying
+ stream supports this interface, otherwise they might get a
+ {@link UnsupportedOperationException}.
+ <p>
+ Implementations should treat 0-length requests as legitimate, and must not
+ signal an error upon their receipt.
+ <p>
+ This does not change the current offset of a file, and is thread-safe.
+
+ @param position position within file
+ @param buf the ByteBuffer to receive the results of the read operation.
+ @return the number of bytes read, possibly zero, or -1 if reached
+         end-of-stream
+ @throws IOException if there is some error performing the read]]>
+      </doc>
+    </method>
+    <method name="readFully"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="position" type="long"/>
+      <param name="buf" type="java.nio.ByteBuffer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Reads {@code buf.remaining()} bytes into buf from a given position in
+ the file or until the end of the data was reached before the read
+ operation completed. Callers should use {@code buf.limit(...)} to
+ control the size of the desired read and {@code buf.position(...)} to
+ control the offset into the buffer the data should be written to.
+ <p>
+ This operation provides similar semantics to
+ {@link #read(long, ByteBuffer)}, the difference is that this method is
+ guaranteed to read data until the {@link ByteBuffer} is full, or until
+ the end of the data stream is reached.
+
+ @param position position within file
+ @param buf the ByteBuffer to receive the results of the read operation.
+ @throws IOException if there is some error performing the read
+ @throws EOFException the end of the data was reached before
+ the read operation completed
+ @see #read(long, ByteBuffer)]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Implementers of this interface provide a positioned read API that writes to a
+ {@link ByteBuffer} rather than a {@code byte[]}.
+
+ @see PositionedReadable
+ @see ByteBufferReadable]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.ByteBufferPositionedReadable -->
+  <!-- start interface org.apache.hadoop.fs.ByteBufferReadable -->
+  <interface name="ByteBufferReadable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="read" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="buf" type="java.nio.ByteBuffer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Reads up to buf.remaining() bytes into buf. Callers should use
+ buf.limit(..) to control the size of the desired read.
+ <p>
+ After a successful call, {@code buf.position()} will be advanced by the
+ number of bytes read and {@code buf.limit()} will be unchanged.
+ <p>
+ In the case of an exception, the state of the buffer (the contents of the
+ buffer, the {@code buf.position()}, the {@code buf.limit()}, etc.) is
+ undefined, and callers should be prepared to recover from this
+ eventuality.
+ <p>
+ Callers should use {@link StreamCapabilities#hasCapability(String)} with
+ {@link StreamCapabilities#READBYTEBUFFER} to check if the underlying
+ stream supports this interface, otherwise they might get a
+ {@link UnsupportedOperationException}.
+ <p>
+ Implementations should treat 0-length requests as legitimate, and must not
+ signal an error upon their receipt.
+
+ @param buf
+          the ByteBuffer to receive the results of the read operation.
+ @return the number of bytes read, possibly zero, or -1 if 
+         reach end-of-stream
+ @throws IOException
+           if there is some error performing the read]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Implementers of this interface provide a read API that writes to a
+ ByteBuffer, not a byte[].]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.ByteBufferReadable -->
+  <!-- start interface org.apache.hadoop.fs.CanSetDropBehind -->
+  <interface name="CanSetDropBehind"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="setDropBehind"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dropCache" type="java.lang.Boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+      <doc>
+      <![CDATA[Configure whether the stream should drop the cache.
+
+ @param dropCache     Whether to drop the cache.  null means to use the
+                      default value.
+ @throws IOException  If there was an error changing the dropBehind
+                      setting.
+         UnsupportedOperationException  If this stream doesn't support
+                                        setting the drop-behind.]]>
+      </doc>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.CanSetDropBehind -->
+  <!-- start interface org.apache.hadoop.fs.CanSetReadahead -->
+  <interface name="CanSetReadahead"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="setReadahead"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="readahead" type="java.lang.Long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+      <doc>
+      <![CDATA[Set the readahead on this stream.
+
+ @param readahead     The readahead to use.  null means to use the default.
+ @throws IOException  If there was an error changing the dropBehind
+                      setting.
+         UnsupportedOperationException  If this stream doesn't support
+                                        setting readahead.]]>
+      </doc>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.CanSetReadahead -->
+  <!-- start interface org.apache.hadoop.fs.CanUnbuffer -->
+  <interface name="CanUnbuffer"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="unbuffer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Reduce the buffering.  This will also free sockets and file descriptors
+ held by the stream, if possible.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[FSDataInputStreams implement this interface to indicate that they can clear
+ their buffers on request.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.CanUnbuffer -->
+  <!-- start class org.apache.hadoop.fs.ChecksumException -->
+  <class name="ChecksumException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ChecksumException" type="java.lang.String, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getPos" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Thrown for checksum errors.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.ChecksumException -->
+  <!-- start class org.apache.hadoop.fs.ChecksumFileSystem -->
+  <class name="ChecksumFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ChecksumFileSystem" type="org.apache.hadoop.fs.FileSystem"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getApproxChkSumLength" return="double"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="size" type="long"/>
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="setVerifyChecksum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="verifyChecksum" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether to verify checksum.]]>
+      </doc>
+    </method>
+    <method name="setWriteChecksum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="writeChecksum" type="boolean"/>
+    </method>
+    <method name="getRawFileSystem" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[get the raw file system]]>
+      </doc>
+    </method>
+    <method name="getChecksumFile" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Return the name of the checksum file associated with a file.]]>
+      </doc>
+    </method>
+    <method name="isChecksumFile" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Return true iff file is a checksum file name.]]>
+      </doc>
+    </method>
+    <method name="getChecksumFileLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="fileSize" type="long"/>
+      <doc>
+      <![CDATA[Return the length of the checksum file given the size of the 
+ actual file.]]>
+      </doc>
+    </method>
+    <method name="getBytesPerSum" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the bytes Per Checksum]]>
+      </doc>
+    </method>
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="bufferSize" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+      </doc>
+    </method>
+    <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="bufferSize" type="int"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="truncate" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="newLength" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="concat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="psrcs" type="org.apache.hadoop.fs.Path[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getChecksumLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="size" type="long"/>
+      <param name="bytesPerSum" type="int"/>
+      <doc>
+      <![CDATA[Calculated the length of the checksum file in bytes.
+ @param size the length of the data file in bytes
+ @param bytesPerSum the number of bytes in a checksum block
+ @return the number of bytes in the checksum file]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createNonRecursive" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="flags" type="java.util.EnumSet"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <param name="checksumOpt" type="org.apache.hadoop.fs.Options.ChecksumOpt"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createNonRecursive" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="flags" type="java.util.EnumSet"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setPermission"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setOwner"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="username" type="java.lang.String"/>
+      <param name="groupname" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setAcl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="aclSpec" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="modifyAclEntries"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="aclSpec" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="removeAcl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="removeAclEntries"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="aclSpec" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="removeDefaultAcl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setReplication" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="replication" type="short"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set replication for an existing file.
+ Implement the abstract <tt>setReplication</tt> of <tt>FileSystem</tt>
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+         false if file does not exist or is a directory]]>
+      </doc>
+    </method>
+    <method name="rename" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Rename files/dirs]]>
+      </doc>
+    </method>
+    <method name="delete" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="recursive" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Implement the delete(Path, boolean) in checksum
+ file system.]]>
+      </doc>
+    </method>
+    <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+ 
+ @param f
+          given path
+ @return the statuses of the files/directories in the given path
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="listStatusIterator" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="listLocatedStatus" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+ 
+ @param f
+          given path
+ @return the statuses of the files/directories in the given patch
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="mkdirs" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="copyFromLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delSrc" type="boolean"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="copyToLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delSrc" type="boolean"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.]]>
+      </doc>
+    </method>
+    <method name="copyToLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <param name="copyCrc" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ If src and dst are directories, the copyCrc parameter
+ determines whether to copy CRC files.]]>
+      </doc>
+    </method>
+    <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+      <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="completeLocalOutput"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+      <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="reportChecksumFailure" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+      <param name="inPos" type="long"/>
+      <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+      <param name="sumsPos" type="long"/>
+      <doc>
+      <![CDATA[Report a checksum error to the file system.
+ @param f the file name containing the error
+ @param in the stream open on the file
+ @param inPos the position of the beginning of the bad data in the file
+ @param sums the stream open on the checksum file
+ @param sumsPos the position of the beginning of the bad data in the checksum file
+ @return if retry is necessary]]>
+      </doc>
+    </method>
+    <method name="openFile" return="org.apache.hadoop.fs.FutureDataInputStreamBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+      <doc>
+      <![CDATA[This is overridden to ensure that this class's
+ {@link #openFileWithOptions}() method is called, and so ultimately
+ its {@link #open(Path, int)}.
+
+ {@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="openFileWithOptions" return="java.util.concurrent.CompletableFuture"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="parameters" type="org.apache.hadoop.fs.impl.OpenFileParameters"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Open the file as a blocking call to {@link #open(Path, int)}.
+
+ {@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="createFile" return="org.apache.hadoop.fs.FSDataOutputStreamBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[This is overridden to ensure that this class's create() method is
+ ultimately called.
+
+ {@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="appendFile" return="org.apache.hadoop.fs.FSDataOutputStreamBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[This is overridden to ensure that this class's create() method is
+ ultimately called.
+
+ {@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="hasPathCapability" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="capability" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Disable those operations which the checksummed FS blocks.
+ {@inheritDoc}]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Abstract Checksumed FileSystem.
+ It provide a basic implementation of a Checksumed FileSystem,
+ which creates a checksum file for each raw file.
+ It generates &amp; verifies checksums at the client side.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.ChecksumFileSystem -->
+  <!-- start class org.apache.hadoop.fs.CommonConfigurationKeysPublic -->
+  <class name="CommonConfigurationKeysPublic" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CommonConfigurationKeysPublic"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <field name="NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY]]>
+      </doc>
+    </field>
+    <field name="FS_DEFAULT_NAME_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="FS_DEFAULT_NAME_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for FS_DEFAULT_NAME_KEY]]>
+      </doc>
+    </field>
+    <field name="FS_DF_INTERVAL_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="FS_DF_INTERVAL_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for FS_DF_INTERVAL_KEY]]>
+      </doc>
+    </field>
+    <field name="FS_DU_INTERVAL_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="FS_DU_INTERVAL_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for FS_DU_INTERVAL_KEY]]>
+      </doc>
+    </field>
+    <field name="FS_GETSPACEUSED_CLASSNAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="FS_GETSPACEUSED_JITTER_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="FS_GETSPACEUSED_JITTER_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for FS_GETSPACEUSED_JITTER_KEY]]>
+      </doc>
+    </field>
+    <field name="FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY]]>
+      </doc>
+    </field>
+    <field name="NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="NET_TOPOLOGY_IMPL_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="NET_DEPENDENCY_SCRIPT_FILE_NAME_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FS_TRASH_CHECKPOINT_INTERVAL_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for FS_TRASH_CHECKPOINT_INTERVAL_KEY]]>
+      </doc>
+    </field>
+    <field name="FS_PROTECTED_DIRECTORIES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Directories that cannot be removed unless empty, even by an
+ administrator.]]>
+      </doc>
+    </field>
+    <field name="FS_LOCAL_BLOCK_SIZE_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Not used anywhere, looks like default value for FS_LOCAL_BLOCK_SIZE]]>
+      </doc>
+    </field>
+    <field name="FS_AUTOMATIC_CLOSE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="FS_AUTOMATIC_CLOSE_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for FS_AUTOMATIC_CLOSE_KEY]]>
+      </doc>
+    </field>
+    <field name="FS_CREATION_PARALLEL_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of filesystems instances can be created in parallel.
+ <p></p>
+ A higher number here does not necessarily improve performance, especially
+ for object stores, where multiple threads may be attempting to create an FS
+ instance for the same URI.
+ <p></p>
+ Default value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="FS_CREATION_PARALLEL_COUNT_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for {@link #FS_CREATION_PARALLEL_COUNT}.
+ <p></p>
+ Default value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="FS_FILE_IMPL_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="FS_FTP_HOST_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="FS_FTP_HOST_PORT_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="FS_TRASH_INTERVAL_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="FS_TRASH_INTERVAL_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for FS_TRASH_INTERVAL_KEY]]>
+      </doc>
+    </field>
+    <field name="FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED.]]>
+      </doc>
+    </field>
+    <field name="IO_MAPFILE_BLOOM_SIZE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IO_MAPFILE_BLOOM_SIZE_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IO_MAPFILE_BLOOM_SIZE_KEY]]>
+      </doc>
+    </field>
+    <field name="IO_MAPFILE_BLOOM_ERROR_RATE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IO_MAPFILE_BLOOM_ERROR_RATE_DEFAULT" type="float"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IO_MAPFILE_BLOOM_ERROR_RATE_KEY]]>
+      </doc>
+    </field>
+    <field name="IO_COMPRESSION_CODEC_LZO_CLASS_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Codec class that implements Lzo compression algorithm]]>
+      </doc>
+    </field>
+    <field name="IO_MAP_INDEX_INTERVAL_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IO_MAP_INDEX_INTERVAL_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IO_MAP_INDEX_INTERVAL_DEFAULT]]>
+      </doc>
+    </field>
+    <field name="IO_MAP_INDEX_SKIP_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IO_MAP_INDEX_SKIP_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IO_MAP_INDEX_SKIP_KEY]]>
+      </doc>
+    </field>
+    <field name="IO_SEQFILE_COMPRESS_BLOCKSIZE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IO_SEQFILE_COMPRESS_BLOCKSIZE_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IO_SEQFILE_COMPRESS_BLOCKSIZE_KEY]]>
+      </doc>
+    </field>
+    <field name="IO_FILE_BUFFER_SIZE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IO_FILE_BUFFER_SIZE_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IO_FILE_BUFFER_SIZE_KEY]]>
+      </doc>
+    </field>
+    <field name="IO_SKIP_CHECKSUM_ERRORS_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IO_SKIP_CHECKSUM_ERRORS_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IO_SKIP_CHECKSUM_ERRORS_KEY]]>
+      </doc>
+    </field>
+    <field name="IO_SORT_MB_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="Moved to mapreduce, see mapreduce.task.io.sort.mb
+ in mapred-default.xml
+ See https://issues.apache.org/jira/browse/HADOOP-6801
+
+ For {@link org.apache.hadoop.io.SequenceFile.Sorter} control
+ instead, see {@link #SEQ_IO_SORT_MB_KEY}.">
+      <doc>
+      <![CDATA[@deprecated Moved to mapreduce, see mapreduce.task.io.sort.mb
+ in mapred-default.xml
+ See https://issues.apache.org/jira/browse/HADOOP-6801
+
+ For {@link org.apache.hadoop.io.SequenceFile.Sorter} control
+ instead, see {@link #SEQ_IO_SORT_MB_KEY}.]]>
+      </doc>
+    </field>
+    <field name="IO_SORT_MB_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for {@link #IO_SORT_MB_KEY}.]]>
+      </doc>
+    </field>
+    <field name="IO_SORT_FACTOR_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="Moved to mapreduce, see mapreduce.task.io.sort.factor
+ in mapred-default.xml
+ See https://issues.apache.org/jira/browse/HADOOP-6801
+
+ For {@link org.apache.hadoop.io.SequenceFile.Sorter} control
+ instead, see {@link #SEQ_IO_SORT_FACTOR_KEY}.">
+      <doc>
+      <![CDATA[@deprecated Moved to mapreduce, see mapreduce.task.io.sort.factor
+ in mapred-default.xml
+ See https://issues.apache.org/jira/browse/HADOOP-6801
+
+ For {@link org.apache.hadoop.io.SequenceFile.Sorter} control
+ instead, see {@link #SEQ_IO_SORT_FACTOR_KEY}.]]>
+      </doc>
+    </field>
+    <field name="IO_SORT_FACTOR_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for {@link #IO_SORT_FACTOR_KEY}.]]>
+      </doc>
+    </field>
+    <field name="SEQ_IO_SORT_MB_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="SEQ_IO_SORT_MB_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for {@link #SEQ_IO_SORT_MB_KEY}.]]>
+      </doc>
+    </field>
+    <field name="SEQ_IO_SORT_FACTOR_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="SEQ_IO_SORT_FACTOR_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for {@link #SEQ_IO_SORT_FACTOR_KEY}.]]>
+      </doc>
+    </field>
+    <field name="IO_SERIALIZATIONS_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="TFILE_IO_CHUNK_SIZE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="TFILE_IO_CHUNK_SIZE_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for TFILE_IO_CHUNK_SIZE_DEFAULT]]>
+      </doc>
+    </field>
+    <field name="TFILE_FS_INPUT_BUFFER_SIZE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="TFILE_FS_INPUT_BUFFER_SIZE_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for TFILE_FS_INPUT_BUFFER_SIZE_KEY]]>
+      </doc>
+    </field>
+    <field name="TFILE_FS_OUTPUT_BUFFER_SIZE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="TFILE_FS_OUTPUT_BUFFER_SIZE_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for TFILE_FS_OUTPUT_BUFFER_SIZE_KEY]]>
+      </doc>
+    </field>
+    <field name="HADOOP_CALLER_CONTEXT_ENABLED_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_CALLER_CONTEXT_ENABLED_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_CALLER_CONTEXT_MAX_SIZE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_CALLER_CONTEXT_MAX_SIZE_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_CONNECT_TIMEOUT_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IPC_CLIENT_CONNECT_TIMEOUT_KEY]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_CONNECT_MAX_RETRIES_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_KEY]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_CONNECT_RETRY_INTERVAL_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_TCPNODELAY_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_TCPNODELAY_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IPC_CLIENT_TCPNODELAY_KEY]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_LOW_LATENCY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable low-latency connections from the client]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_LOW_LATENCY_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value of IPC_CLIENT_LOW_LATENCY]]>
+      </doc>
+    </field>
+    <field name="IPC_SERVER_LISTEN_QUEUE_SIZE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IPC_SERVER_LISTEN_QUEUE_SIZE_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IPC_SERVER_LISTEN_QUEUE_SIZE_KEY]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_KILL_MAX_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_KILL_MAX_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IPC_CLIENT_KILL_MAX_KEY]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_IDLETHRESHOLD_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IPC_CLIENT_IDLETHRESHOLD_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IPC_CLIENT_IDLETHRESHOLD_DEFAULT]]>
+      </doc>
+    </field>
+    <field name="IPC_SERVER_TCPNODELAY_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IPC_SERVER_TCPNODELAY_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IPC_SERVER_TCPNODELAY_KEY]]>
+      </doc>
+    </field>
+    <field name="IPC_SERVER_REUSEADDR_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IPC_SERVER_REUSEADDR_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IPC_SERVER_REUSEADDR_KEY.]]>
+      </doc>
+    </field>
+    <field name="IPC_SERVER_MAX_CONNECTIONS_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="IPC_SERVER_MAX_CONNECTIONS_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for IPC_SERVER_MAX_CONNECTIONS_KEY]]>
+      </doc>
+    </field>
+    <field name="IPC_SERVER_LOG_SLOW_RPC" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Logs if a RPC is really slow compared to rest of RPCs.]]>
+      </doc>
+    </field>
+    <field name="IPC_SERVER_LOG_SLOW_RPC_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IPC_SERVER_PURGE_INTERVAL_MINUTES_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IPC_SERVER_PURGE_INTERVAL_MINUTES_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_SOCKS_SERVER_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_UTIL_HASH_TYPE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_UTIL_HASH_TYPE_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for HADOOP_UTIL_HASH_TYPE_KEY]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_GROUP_MAPPING" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_GROUPS_CACHE_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_SECURITY_GROUPS_CACHE_BACKGROUND_RELOAD" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_GROUPS_CACHE_BACKGROUND_RELOAD_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_GROUPS_CACHE_BACKGROUND_RELOAD_THREADS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_GROUPS_CACHE_BACKGROUND_RELOAD_THREADS_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="use
+ {@link CommonConfigurationKeysPublic#HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY}
+ instead.">
+      <doc>
+      <![CDATA[@deprecated use
+ {@link CommonConfigurationKeysPublic#HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_KEY}
+ instead.]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="use
+ {@link CommonConfigurationKeysPublic#HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT}
+ instead.">
+      <doc>
+      <![CDATA[@deprecated use
+ {@link CommonConfigurationKeysPublic#HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_DEFAULT}
+ instead.]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_AUTHENTICATION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_AUTHORIZATION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_SERVICE_USER_NAME_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_AUTH_TO_LOCAL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_AUTH_TO_LOCAL_MECHANISM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_DNS_INTERFACE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_DNS_NAMESERVER_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_TOKEN_FILES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_TOKENS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_HTTP_AUTHENTICATION_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN]]>
+      </doc>
+    </field>
+    <field name="HADOOP_KERBEROS_KEYTAB_LOGIN_AUTORENEWAL_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_KERBEROS_KEYTAB_LOGIN_AUTORENEWAL_ENABLED_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for HADOOP_KERBEROS_KEYTAB_LOGIN_AUTORENEWAL_ENABLED.]]>
+      </doc>
+    </field>
+    <field name="HADOOP_RPC_PROTECTION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Class to override Sasl Properties for a connection]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_CRYPTO_JCEKS_KEY_SERIALFILTER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Defalt value for HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Class to override Impersonation provider]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_KEY_PROVIDER_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_KEY_DEFAULT_BITLENGTH_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_KEY_DEFAULT_BITLENGTH_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Defalt value for HADOOP_SECURITY_KEY_DEFAULT_BITLENGTH_KEY.]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_KEY_DEFAULT_CIPHER_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_KEY_DEFAULT_CIPHER_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Defalt value for HADOOP_SECURITY_KEY_DEFAULT_CIPHER_KEY.]]>
+      </doc>
+    </field>
+    <field name="KMS_CLIENT_ENC_KEY_CACHE_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for KMS_CLIENT_ENC_KEY_CACHE_SIZE]]>
+      </doc>
+    </field>
+    <field name="KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT" type="float"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK]]>
+      </doc>
+    </field>
+    <field name="KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for KMS_CLIENT_ENC_KEY_NUM_REFILL_THREADS]]>
+      </doc>
+    </field>
+    <field name="KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for KMS_CLIENT_ENC_KEY_CACHE_EXPIRY (12 hrs)]]>
+      </doc>
+    </field>
+    <field name="KMS_CLIENT_TIMEOUT_SECONDS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="KMS_CLIENT_TIMEOUT_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value is the number of providers specified.]]>
+      </doc>
+    </field>
+    <field name="KMS_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="KMS_CLIENT_FAILOVER_SLEEP_BASE_MILLIS_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value is 100 ms.]]>
+      </doc>
+    </field>
+    <field name="KMS_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="KMS_CLIENT_FAILOVER_SLEEP_MAX_MILLIS_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value is 2 secs.]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Defalt value for HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_HTTP_LOGS_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_HTTP_LOGS_ENABLED_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Defalt value for HADOOP_HTTP_LOGS_ENABLED]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_CREDENTIAL_PROVIDER_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_CREDENTIAL_CLEAR_TEXT_FALLBACK" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_CREDENTIAL_CLEAR_TEXT_FALLBACK_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_SECURITY_CREDENTIAL_PASSWORD_FILE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_SYSTEM_TAGS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="Please use
+ {@link CommonConfigurationKeysPublic#HADOOP_TAGS_SYSTEM} instead
+ See https://issues.apache.org/jira/browse/HADOOP-15474">
+      <doc>
+      <![CDATA[@deprecated Please use
+ {@link CommonConfigurationKeysPublic#HADOOP_TAGS_SYSTEM} instead
+ See https://issues.apache.org/jira/browse/HADOOP-15474]]>
+      </doc>
+    </field>
+    <field name="HADOOP_CUSTOM_TAGS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="Please use
+ {@link CommonConfigurationKeysPublic#HADOOP_TAGS_CUSTOM} instead
+ See https://issues.apache.org/jira/browse/HADOOP-15474">
+      <doc>
+      <![CDATA[@deprecated Please use
+ {@link CommonConfigurationKeysPublic#HADOOP_TAGS_CUSTOM} instead
+ See https://issues.apache.org/jira/browse/HADOOP-15474]]>
+      </doc>
+    </field>
+    <field name="HADOOP_TAGS_SYSTEM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_TAGS_CUSTOM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SERVICE_SHUTDOWN_TIMEOUT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Configuration option for the shutdown hook manager shutdown time:
+  {@value}.]]>
+      </doc>
+    </field>
+    <field name="SERVICE_SHUTDOWN_TIMEOUT_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default shutdown hook timeout: {@value} seconds.]]>
+      </doc>
+    </field>
+    <field name="HADOOP_PROMETHEUS_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_PROMETHEUS_ENABLED_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_HTTP_IDLE_TIMEOUT_MS_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@see
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ core-default.xml</a>]]>
+      </doc>
+    </field>
+    <field name="HADOOP_HTTP_IDLE_TIMEOUT_MS_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class contains constants for configuration keys used
+ in the common code.
+
+ It includes all publicly documented configuration keys. In general
+ this class should not be used directly (use CommonConfigurationKeys
+ instead)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.CommonConfigurationKeysPublic -->
+  <!-- start class org.apache.hadoop.fs.ContentSummary -->
+  <class name="ContentSummary" extends="org.apache.hadoop.fs.QuotaUsage"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="ContentSummary"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor deprecated by ContentSummary.Builder]]>
+      </doc>
+    </constructor>
+    <constructor name="ContentSummary" type="long, long, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor, deprecated by ContentSummary.Builder
+  This constructor implicitly set spaceConsumed the same as length.
+  spaceConsumed and length must be set explicitly with
+  ContentSummary.Builder]]>
+      </doc>
+    </constructor>
+    <constructor name="ContentSummary" type="long, long, long, long, long, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor, deprecated by ContentSummary.Builder]]>
+      </doc>
+    </constructor>
+    <method name="getLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the length]]>
+      </doc>
+    </method>
+    <method name="getSnapshotLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDirectoryCount" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the directory count]]>
+      </doc>
+    </method>
+    <method name="getSnapshotDirectoryCount" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFileCount" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the file count]]>
+      </doc>
+    </method>
+    <method name="getSnapshotFileCount" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getSnapshotSpaceConsumed" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getErasureCodingPolicy" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="to" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getHeader" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="qOption" type="boolean"/>
+      <doc>
+      <![CDATA[Return the header of the output.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+ 
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the header of the output]]>
+      </doc>
+    </method>
+    <method name="getSnapshotHeader" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getHeaderFields" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the names of the fields from the summary header.
+ 
+ @return names of fields as displayed in the header]]>
+      </doc>
+    </method>
+    <method name="getQuotaHeaderFields" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the names of the fields used in the quota summary.
+ 
+ @return names of quota fields as displayed in the header]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="qOption" type="boolean"/>
+      <doc>
+      <![CDATA[Return the string representation of the object in the output format.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @return the string representation of the object]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="qOption" type="boolean"/>
+      <param name="hOption" type="boolean"/>
+      <doc>
+      <![CDATA[Return the string representation of the object in the output format.
+ For description of the options,
+ @see #toString(boolean, boolean, boolean, boolean, List)
+ 
+ @param qOption a flag indicating if quota needs to be printed or not
+ @param hOption a flag indicating if human readable output if to be used
+ @return the string representation of the object]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="qOption" type="boolean"/>
+      <param name="hOption" type="boolean"/>
+      <param name="xOption" type="boolean"/>
+      <doc>
+      <![CDATA[Return the string representation of the object in the output format.
+ For description of the options,
+ @see #toString(boolean, boolean, boolean, boolean, List)
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @param hOption a flag indicating if human readable output is to be used
+ @param xOption a flag indicating if calculation from snapshots is to be
+                included in the output
+ @return the string representation of the object]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="qOption" type="boolean"/>
+      <param name="hOption" type="boolean"/>
+      <param name="tOption" type="boolean"/>
+      <param name="types" type="java.util.List"/>
+      <doc>
+      <![CDATA[Return the string representation of the object in the output format.
+ For description of the options,
+ @see #toString(boolean, boolean, boolean, boolean, List)
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @param hOption a flag indicating if human readable output if to be used
+ @param tOption a flag indicating if display quota by storage types
+ @param types Storage types to display
+ @return the string representation of the object]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="qOption" type="boolean"/>
+      <param name="hOption" type="boolean"/>
+      <param name="tOption" type="boolean"/>
+      <param name="xOption" type="boolean"/>
+      <param name="types" type="java.util.List"/>
+      <doc>
+      <![CDATA[Return the string representation of the object in the output format.
+ if qOption is false, output directory count, file count, and content size;
+ if qOption is true, output quota and remaining quota as well.
+ if hOption is false, file sizes are returned in bytes
+ if hOption is true, file sizes are returned in human readable
+ if tOption is true, display the quota by storage types
+ if tOption is false, same logic with #toString(boolean,boolean)
+ if xOption is false, output includes the calculation from snapshots
+ if xOption is true, output excludes the calculation from snapshots
+
+ @param qOption a flag indicating if quota needs to be printed or not
+ @param hOption a flag indicating if human readable output is to be used
+ @param tOption a flag indicating if display quota by storage types
+ @param xOption a flag indicating if calculation from snapshots is to be
+                included in the output
+ @param types Storage types to display
+ @return the string representation of the object]]>
+      </doc>
+    </method>
+    <method name="toSnapshot" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hOption" type="boolean"/>
+      <doc>
+      <![CDATA[Return the string representation of the snapshot counts in the output
+ format.
+ @param hOption flag indicating human readable or not
+ @return String representation of the snapshot counts]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Store the summary of a content (a directory or a file).]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.ContentSummary -->
+  <!-- start class org.apache.hadoop.fs.CreateFlag -->
+  <class name="CreateFlag" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.fs.CreateFlag[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.fs.CreateFlag"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="validate"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="flag" type="java.util.EnumSet"/>
+      <doc>
+      <![CDATA[Validate the CreateFlag and throw exception if it is invalid
+ @param flag set of CreateFlag
+ @throws HadoopIllegalArgumentException if the CreateFlag is invalid]]>
+      </doc>
+    </method>
+    <method name="validate"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="java.lang.Object"/>
+      <param name="pathExists" type="boolean"/>
+      <param name="flag" type="java.util.EnumSet"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Validate the CreateFlag for create operation
+ @param path Object representing the path; usually String or {@link Path}
+ @param pathExists pass true if the path exists in the file system
+ @param flag set of CreateFlag
+ @throws IOException on error
+ @throws HadoopIllegalArgumentException if the CreateFlag is invalid]]>
+      </doc>
+    </method>
+    <method name="validateForAppend"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="flag" type="java.util.EnumSet"/>
+      <doc>
+      <![CDATA[Validate the CreateFlag for the append operation. The flag must contain
+ APPEND, and cannot contain OVERWRITE.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[CreateFlag specifies the file create semantic. Users can combine flags like: <br>
+ <code>
+ EnumSet.of(CreateFlag.CREATE, CreateFlag.APPEND)
+ </code>
+ <p>
+ 
+ Use the CreateFlag as follows:
+ <ol>
+ <li> CREATE - to create a file if it does not exist, 
+ else throw FileAlreadyExists.</li>
+ <li> APPEND - to append to a file if it exists, 
+ else throw FileNotFoundException.</li>
+ <li> OVERWRITE - to truncate a file if it exists, 
+ else throw FileNotFoundException.</li>
+ <li> CREATE|APPEND - to create a file if it does not exist, 
+ else append to an existing file.</li>
+ <li> CREATE|OVERWRITE - to create a file if it does not exist, 
+ else overwrite an existing file.</li>
+ <li> SYNC_BLOCK - to force closed blocks to the disk device.
+ In addition {@link Syncable#hsync()} should be called after each write,
+ if true synchronous behavior is required.</li>
+ <li> LAZY_PERSIST - Create the block on transient storage (RAM) if
+ available.</li>
+ <li> APPEND_NEWBLOCK - Append data to a new block instead of end of the last
+ partial block.</li>
+ </ol>
+ 
+ Following combinations are not valid and will result in
+ {@link HadoopIllegalArgumentException}:
+ <ol>
+ <li> APPEND|OVERWRITE</li>
+ <li> CREATE|APPEND|OVERWRITE</li>
+ </ol>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.CreateFlag -->
+  <!-- start class org.apache.hadoop.fs.FileAlreadyExistsException -->
+  <class name="FileAlreadyExistsException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FileAlreadyExistsException"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileAlreadyExistsException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Used when target file already exists for any operation and 
+ is not configured to be overwritten.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.FileAlreadyExistsException -->
+  <!-- start class org.apache.hadoop.fs.FileChecksum -->
+  <class name="FileChecksum" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="FileChecksum"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAlgorithmName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The checksum algorithm name]]>
+      </doc>
+    </method>
+    <method name="getLength" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The length of the checksum in bytes]]>
+      </doc>
+    </method>
+    <method name="getBytes" return="byte[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The value of the checksum in bytes]]>
+      </doc>
+    </method>
+    <method name="getChecksumOpt" return="org.apache.hadoop.fs.Options.ChecksumOpt"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Return true if both the algorithms and the values are the same.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[An abstract class representing file checksums for files.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.FileChecksum -->
+  <!-- start class org.apache.hadoop.fs.FileContext -->
+  <class name="FileContext" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.fs.PathCapabilities"/>
+    <method name="getFSofPath" return="org.apache.hadoop.fs.AbstractFileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="absOrFqPath" type="org.apache.hadoop.fs.Path"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the file system of supplied path.
+ 
+ @param absOrFqPath - absolute or fully qualified path
+ @return the file system of the path
+ 
+ @throws UnsupportedFileSystemException If the file system for
+           <code>absOrFqPath</code> is not supported.
+ @throws IOException If the file system for <code>absOrFqPath</code> could
+         not be instantiated.]]>
+      </doc>
+    </method>
+    <method name="getFileContext" return="org.apache.hadoop.fs.FileContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="defFS" type="org.apache.hadoop.fs.AbstractFileSystem"/>
+      <param name="aConf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Create a FileContext with specified FS as default using the specified
+ config.
+ 
+ @param defFS
+ @param aConf
+ @return new FileContext with specified FS as default.]]>
+      </doc>
+    </method>
+    <method name="getFileContext" return="org.apache.hadoop.fs.FileContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="defaultFS" type="org.apache.hadoop.fs.AbstractFileSystem"/>
+      <doc>
+      <![CDATA[Create a FileContext for specified file system using the default config.
+ 
+ @param defaultFS
+ @return a FileContext with the specified AbstractFileSystem
+                 as the default FS.]]>
+      </doc>
+    </method>
+    <method name="getFileContext" return="org.apache.hadoop.fs.FileContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <doc>
+      <![CDATA[Create a FileContext using the default config read from the
+ $HADOOP_CONFIG/core.xml, Unspecified key-values for config are defaulted
+ from core-defaults.xml in the release jar.
+ 
+ @throws UnsupportedFileSystemException If the file system from the default
+           configuration is not supported]]>
+      </doc>
+    </method>
+    <method name="getLocalFSFileContext" return="org.apache.hadoop.fs.FileContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <doc>
+      <![CDATA[@return a FileContext for the local file system using the default config.
+ @throws UnsupportedFileSystemException If the file system for
+           {@link FsConstants#LOCAL_FS_URI} is not supported.]]>
+      </doc>
+    </method>
+    <method name="getFileContext" return="org.apache.hadoop.fs.FileContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="defaultFsUri" type="java.net.URI"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <doc>
+      <![CDATA[Create a FileContext for specified URI using the default config.
+ 
+ @param defaultFsUri
+ @return a FileContext with the specified URI as the default FS.
+ 
+ @throws UnsupportedFileSystemException If the file system for
+           <code>defaultFsUri</code> is not supported]]>
+      </doc>
+    </method>
+    <method name="getFileContext" return="org.apache.hadoop.fs.FileContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="defaultFsUri" type="java.net.URI"/>
+      <param name="aConf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <doc>
+      <![CDATA[Create a FileContext for specified default URI using the specified config.
+ 
+ @param defaultFsUri
+ @param aConf
+ @return new FileContext for specified uri
+ @throws UnsupportedFileSystemException If the file system with specified is
+           not supported
+ @throws RuntimeException If the file system specified is supported but
+         could not be instantiated, or if login fails.]]>
+      </doc>
+    </method>
+    <method name="getFileContext" return="org.apache.hadoop.fs.FileContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="aConf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <doc>
+      <![CDATA[Create a FileContext using the passed config. Generally it is better to use
+ {@link #getFileContext(URI, Configuration)} instead of this one.
+ 
+ 
+ @param aConf
+ @return new FileContext
+ @throws UnsupportedFileSystemException If file system in the config
+           is not supported]]>
+      </doc>
+    </method>
+    <method name="getLocalFSFileContext" return="org.apache.hadoop.fs.FileContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="aConf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <doc>
+      <![CDATA[@param aConf - from which the FileContext is configured
+ @return a FileContext for the local file system using the specified config.
+ 
+ @throws UnsupportedFileSystemException If default file system in the config
+           is not supported]]>
+      </doc>
+    </method>
+    <method name="setWorkingDirectory"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newWDir" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the working directory for wd-relative names (such a "foo/bar"). Working
+ directory feature is provided by simply prefixing relative names with the
+ working dir. Note this is different from Unix where the wd is actually set
+ to the inode. Hence setWorkingDir does not follow symlinks etc. This works
+ better in a distributed environment that has multiple independent roots.
+ {@link #getWorkingDirectory()} should return what setWorkingDir() set.
+ 
+ @param newWDir new working directory
+ @throws IOException 
+ <br>
+           NewWdir can be one of:
+           <ul>
+           <li>relative path: "foo/bar";</li>
+           <li>absolute without scheme: "/foo/bar"</li>
+           <li>fully qualified with scheme: "xx://auth/foo/bar"</li>
+           </ul>
+ <br>
+           Illegal WDs:
+           <ul>
+           <li>relative with scheme: "xx:foo/bar"</li>
+           <li>non existent directory</li>
+           </ul>]]>
+      </doc>
+    </method>
+    <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Gets the working directory for wd-relative names (such a "foo/bar").]]>
+      </doc>
+    </method>
+    <method name="getUgi" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Gets the ugi in the file-context
+ @return UserGroupInformation]]>
+      </doc>
+    </method>
+    <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the current user's home directory in this file system.
+ The default implementation returns "/user/$USER/".
+ @return the home directory]]>
+      </doc>
+    </method>
+    <method name="getUMask" return="org.apache.hadoop.fs.permission.FsPermission"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the umask of this FileContext]]>
+      </doc>
+    </method>
+    <method name="setUMask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newUmask" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <doc>
+      <![CDATA[Set umask to the supplied parameter.
+ @param newUmask  the new umask]]>
+      </doc>
+    </method>
+    <method name="resolvePath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Resolve the path following any symlinks or mount points
+ @param f to be resolved
+ @return fully qualified resolved path
+ 
+ @throws FileNotFoundException  If <code>f</code> does not exist
+ @throws AccessControlException if access denied
+ @throws IOException If an IO Error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws
+           undeclared exception to RPC server
+ 
+ RuntimeExceptions:
+ @throws InvalidPathException If path <code>f</code> is not valid]]>
+      </doc>
+    </method>
+    <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Make the path fully qualified if it is isn't. 
+ A Fully-qualified path has scheme and authority specified and an absolute
+ path.
+ Use the default file system and working dir in this FileContext to qualify.
+ @param path
+ @return qualified path]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="createFlag" type="java.util.EnumSet"/>
+      <param name="opts" type="org.apache.hadoop.fs.Options.CreateOpts[]"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create or overwrite file on indicated path and returns an output stream for
+ writing into the file.
+ 
+ @param f the file name to open
+ @param createFlag gives the semantics of create; see {@link CreateFlag}
+ @param opts file creation options; see {@link Options.CreateOpts}.
+          <ul>
+          <li>Progress - to report progress on the operation - default null
+          <li>Permission - umask is applied against permission: default is
+          FsPermissions:getDefault()
+ 
+          <li>CreateParent - create missing parent path; default is to not
+          to create parents
+          <li>The defaults for the following are SS defaults of the file
+          server implementing the target path. Not all parameters make sense
+          for all kinds of file system - eg. localFS ignores Blocksize,
+          replication, checksum
+          <ul>
+          <li>BufferSize - buffersize used in FSDataOutputStream
+          <li>Blocksize - block size for file blocks
+          <li>ReplicationFactor - replication for blocks
+          <li>ChecksumParam - Checksum parameters. server default is used
+          if not specified.
+          </ul>
+          </ul>
+ 
+ @return {@link FSDataOutputStream} for created file
+ 
+ @throws AccessControlException If access is denied
+ @throws FileAlreadyExistsException If file <code>f</code> already exists
+ @throws FileNotFoundException If parent of <code>f</code> does not exist
+           and <code>createParent</code> is false
+ @throws ParentNotDirectoryException If parent of <code>f</code> is not a
+           directory.
+ @throws UnsupportedFileSystemException If file system for <code>f</code> is
+           not supported
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws
+           undeclared exception to RPC server
+ 
+ RuntimeExceptions:
+ @throws InvalidPathException If path <code>f</code> is not valid]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStreamBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a {@link FSDataOutputStreamBuilder} for creating or overwriting
+ a file on indicated path.
+
+ @param f the file path to create builder for.
+ @return {@link FSDataOutputStreamBuilder} to build a
+         {@link FSDataOutputStream}.
+
+ Upon {@link FSDataOutputStreamBuilder#build()} being invoked,
+ builder parameters will be verified by {@link FileContext} and
+ {@link AbstractFileSystem#create}. And filesystem states will be modified.
+
+ Client should expect {@link FSDataOutputStreamBuilder#build()} throw the
+ same exceptions as create(Path, EnumSet, CreateOpts...).]]>
+      </doc>
+    </method>
+    <method name="mkdir"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dir" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="createParent" type="boolean"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Make(create) a directory and all the non-existent parents.
+ 
+ @param dir - the dir to make
+ @param permission - permissions is set permission{@literal &~}umask
+ @param createParent - if true then missing parent dirs are created if false
+          then parent must exist
+ 
+ @throws AccessControlException If access is denied
+ @throws FileAlreadyExistsException If directory <code>dir</code> already
+           exists
+ @throws FileNotFoundException If parent of <code>dir</code> does not exist
+           and <code>createParent</code> is false
+ @throws ParentNotDirectoryException If parent of <code>dir</code> is not a
+           directory
+ @throws UnsupportedFileSystemException If file system for <code>dir</code>
+         is not supported
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws UnexpectedServerException If server implementation throws 
+           undeclared exception to RPC server
+ 
+ RuntimeExceptions:
+ @throws InvalidPathException If path <code>dir</code> is not valid]]>
+      </doc>
+    </method>
+    <method name="delete" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="recursive" type="boolean"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Delete a file.
+ @param f the path to delete.
+ @param recursive if path is a directory and set to 
+ true, the directory is deleted else throws an exception. In
+ case of a file the recursive can be set to either true or false.
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If <code>f</code> does not exist
+ @throws UnsupportedFileSystemException If file system for <code>f</code> is
+           not supported
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws 
+           undeclared exception to RPC server
+ 
+ RuntimeExceptions:
+ @throws InvalidPathException If path <code>f</code> is invalid]]>
+      </doc>
+    </method>
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Opens an FSDataInputStream at the indicated Path using
+ default buffersize.
+ @param f the file name to open
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If file <code>f</code> does not exist
+ @throws UnsupportedFileSystemException If file system for <code>f</code>
+         is not supported
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws 
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="bufferSize" type="int"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ 
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.
+ 
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If file <code>f</code> does not exist
+ @throws UnsupportedFileSystemException If file system for <code>f</code> is
+           not supported
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws 
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="truncate" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="newLength" type="long"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Truncate the file in the indicated path to the indicated size.
+ <ul>
+ <li>Fails if path is a directory.
+ <li>Fails if path does not exist.
+ <li>Fails if path is not closed.
+ <li>Fails if new size is greater than current size.
+ </ul>
+ @param f The path to the file to be truncated
+ @param newLength The size the file is to be truncated to
+
+ @return <code>true</code> if the file has been truncated to the desired
+ <code>newLength</code> and is immediately available to be reused for
+ write operations such as <code>append</code>, or
+ <code>false</code> if a background process of adjusting the length of
+ the last block has been started, and clients should wait for it to
+ complete before proceeding with further file updates.
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If file <code>f</code> does not exist
+ @throws UnsupportedFileSystemException If file system for <code>f</code> is
+           not supported
+ @throws IOException If an I/O error occurred
+
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="setReplication" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="replication" type="short"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set replication for an existing file.
+ 
+ @param f file name
+ @param replication new replication
+
+ @return true if successful
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If file <code>f</code> does not exist
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws 
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="rename"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Renames Path src to Path dst
+ <ul>
+ <li>Fails if src is a file and dst is a directory.
+ <li>Fails if src is a directory and dst is a file.
+ <li>Fails if the parent of dst does not exist or is a file.
+ </ul>
+ <p>
+ If OVERWRITE option is not passed as an argument, rename fails if the dst
+ already exists.
+ <p>
+ If OVERWRITE option is passed as an argument, rename overwrites the dst if
+ it is a file or an empty directory. Rename fails if dst is a non-empty
+ directory.
+ <p>
+ Note that atomicity of rename is dependent on the file system
+ implementation. Please refer to the file system documentation for details
+ <p>
+ 
+ @param src path to be renamed
+ @param dst new path after rename
+ 
+ @throws AccessControlException If access is denied
+ @throws FileAlreadyExistsException If <code>dst</code> already exists and
+           <code>options</code> has {@link Options.Rename#OVERWRITE}
+           option false.
+ @throws FileNotFoundException If <code>src</code> does not exist
+ @throws ParentNotDirectoryException If parent of <code>dst</code> is not a
+           directory
+ @throws UnsupportedFileSystemException If file system for <code>src</code>
+           and <code>dst</code> is not supported
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="setPermission"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set permission of a path.
+ @param f
+ @param permission - the new absolute permission (umask is not applied)
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If <code>f</code> does not exist
+ @throws UnsupportedFileSystemException If file system for <code>f</code>
+         is not supported
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws 
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="setOwner"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="username" type="java.lang.String"/>
+      <param name="groupname" type="java.lang.String"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set owner of a path (i.e. a file or a directory). The parameters username
+ and groupname cannot both be null.
+ 
+ @param f The path
+ @param username If it is null, the original username remains unchanged.
+ @param groupname If it is null, the original groupname remains unchanged.
+ 
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If <code>f</code> does not exist
+ @throws UnsupportedFileSystemException If file system for <code>f</code> is
+           not supported
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws 
+           undeclared exception to RPC server
+ 
+ RuntimeExceptions:
+ @throws HadoopIllegalArgumentException If <code>username</code> or
+           <code>groupname</code> is invalid.]]>
+      </doc>
+    </method>
+    <method name="setTimes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="mtime" type="long"/>
+      <param name="atime" type="long"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set access time of a file.
+ @param f The path
+ @param mtime Set the modification time of this file.
+        The number of milliseconds since epoch (Jan 1, 1970). 
+        A value of -1 means that this call should not set modification time.
+ @param atime Set the access time of this file.
+        The number of milliseconds since Jan 1, 1970. 
+        A value of -1 means that this call should not set access time.
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If <code>f</code> does not exist
+ @throws UnsupportedFileSystemException If file system for <code>f</code> is
+           not supported
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws 
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the checksum of a file.
+
+ @param f file path
+
+ @return The file checksum.  The default return value is null,
+  which indicates that no checksum algorithm is implemented
+  in the corresponding FileSystem.
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If <code>f</code> does not exist
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws 
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="setVerifyChecksum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="verifyChecksum" type="boolean"/>
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the verify checksum flag for the  file system denoted by the path.
+ This is only applicable if the 
+ corresponding FileSystem supports checksum. By default doesn't do anything.
+ @param verifyChecksum
+ @param f set the verifyChecksum for the Filesystem containing this path
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If <code>f</code> does not exist
+ @throws UnsupportedFileSystemException If file system for <code>f</code> is
+           not supported
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws 
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return a file status object that represents the path.
+ @param f The path we want information from
+
+ @return a FileStatus object
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If <code>f</code> does not exist
+ @throws UnsupportedFileSystemException If file system for <code>f</code> is
+           not supported
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws 
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="msync"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+      <doc>
+      <![CDATA[Synchronize client metadata state.
+
+ @throws IOException
+ @throws UnsupportedOperationException]]>
+      </doc>
+    </method>
+    <method name="getFileLinkStatus" return="org.apache.hadoop.fs.FileStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return a file status object that represents the path. If the path 
+ refers to a symlink then the FileStatus of the symlink is returned.
+ The behavior is equivalent to #getFileStatus() if the underlying
+ file system does not support symbolic links.
+ @param  f The path we want information from.
+ @return A FileStatus object
+ 
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If <code>f</code> does not exist
+ @throws UnsupportedFileSystemException If file system for <code>f</code> is
+           not supported
+ @throws IOException If an I/O error occurred]]>
+      </doc>
+    </method>
+    <method name="getLinkTarget" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns the target of the given symbolic link as it was specified
+ when the link was created.  Links in the path leading up to the
+ final path component are resolved transparently.
+
+ @param f the path to return the target of
+ @return The un-interpreted target of the symbolic link.
+ 
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If path <code>f</code> does not exist
+ @throws UnsupportedFileSystemException If file system for <code>f</code> is
+           not supported
+ @throws IOException If the given path does not refer to a symlink
+           or an I/O error occurred]]>
+      </doc>
+    </method>
+    <method name="getFsStatus" return="org.apache.hadoop.fs.FsStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns a status object describing the use and capacity of the
+ file system denoted by the Parh argument p.
+ If the file system has multiple partitions, the
+ use and capacity of the partition pointed to by the specified
+ path is reflected.
+ 
+ @param f Path for which status should be obtained. null means the
+ root partition of the default file system. 
+
+ @return a FsStatus object
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If <code>f</code> does not exist
+ @throws UnsupportedFileSystemException If file system for <code>f</code> is
+           not supported
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws 
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="createSymlink"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="target" type="org.apache.hadoop.fs.Path"/>
+      <param name="link" type="org.apache.hadoop.fs.Path"/>
+      <param name="createParent" type="boolean"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Creates a symbolic link to an existing file. An exception is thrown if 
+ the symlink exits, the user does not have permission to create symlink,
+ or the underlying file system does not support symlinks.
+ 
+ Symlink permissions are ignored, access to a symlink is determined by
+ the permissions of the symlink target.
+ 
+ Symlinks in paths leading up to the final path component are resolved 
+ transparently. If the final path component refers to a symlink some 
+ functions operate on the symlink itself, these are:
+ - delete(f) and deleteOnExit(f) - Deletes the symlink.
+ - rename(src, dst) - If src refers to a symlink, the symlink is 
+   renamed. If dst refers to a symlink, the symlink is over-written.
+ - getLinkTarget(f) - Returns the target of the symlink. 
+ - getFileLinkStatus(f) - Returns a FileStatus object describing
+   the symlink.
+ Some functions, create() and mkdir(), expect the final path component
+ does not exist. If they are given a path that refers to a symlink that 
+ does exist they behave as if the path referred to an existing file or 
+ directory. All other functions fully resolve, ie follow, the symlink. 
+ These are: open, setReplication, setOwner, setTimes, setWorkingDirectory,
+ setPermission, getFileChecksum, setVerifyChecksum, getFileBlockLocations,
+ getFsStatus, getFileStatus, exists, and listStatus.
+ 
+ Symlink targets are stored as given to createSymlink, assuming the 
+ underlying file system is capable of storing a fully qualified URI.
+ Dangling symlinks are permitted. FileContext supports four types of 
+ symlink targets, and resolves them as follows
+ <pre>
+ Given a path referring to a symlink of form:
+ 
+   {@literal <---}X{@literal --->}
+   fs://host/A/B/link 
+   {@literal <-----}Y{@literal ----->}
+ 
+ In this path X is the scheme and authority that identify the file system,
+ and Y is the path leading up to the final path component "link". If Y is
+ a symlink  itself then let Y' be the target of Y and X' be the scheme and
+ authority of Y'. Symlink targets may:
+ 
+ 1. Fully qualified URIs
+ 
+ fs://hostX/A/B/file  Resolved according to the target file system.
+ 
+ 2. Partially qualified URIs (eg scheme but no host)
+ 
+ fs:///A/B/file  Resolved according to the target file system. Eg resolving
+                 a symlink to hdfs:///A results in an exception because
+                 HDFS URIs must be fully qualified, while a symlink to 
+                 file:///A will not since Hadoop's local file systems 
+                 require partially qualified URIs.
+ 
+ 3. Relative paths
+ 
+ path  Resolves to [Y'][path]. Eg if Y resolves to hdfs://host/A and path 
+       is "../B/file" then [Y'][path] is hdfs://host/B/file
+ 
+ 4. Absolute paths
+ 
+ path  Resolves to [X'][path]. Eg if Y resolves hdfs://host/A/B and path
+       is "/file" then [X][path] is hdfs://host/file
+ </pre>
+ 
+ @param target the target of the symbolic link
+ @param link the path to be created that points to target
+ @param createParent if true then missing parent dirs are created if 
+                     false then parent must exist
+
+
+ @throws AccessControlException If access is denied
+ @throws FileAlreadyExistsException If file <code>link</code> already exists
+ @throws FileNotFoundException If <code>target</code> does not exist
+ @throws ParentNotDirectoryException If parent of <code>link</code> is not a
+           directory.
+ @throws UnsupportedFileSystemException If file system for 
+           <code>target</code> or <code>link</code> is not supported
+ @throws IOException If an I/O error occurred]]>
+      </doc>
+    </method>
+    <method name="listStatus" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+ 
+ @param f is the path
+
+ @return an iterator that traverses statuses of the files/directories 
+         in the given path
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If <code>f</code> does not exist
+ @throws UnsupportedFileSystemException If file system for <code>f</code> is
+           not supported
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws 
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="listCorruptFileBlocks" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@return an iterator over the corrupt files under the given path
+ (may contain duplicates if a file has more than one corrupt block)
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="listLocatedStatus" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory. 
+ Return the file's status and block locations If the path is a file.
+ 
+ If a returned status is a file, it contains the file's block locations.
+
+ @param f is the path
+
+ @return an iterator that traverses statuses of the files/directories 
+         in the given path
+ If any IO exception (for example the input directory gets deleted while
+ listing is being executed), next() or hasNext() of the returned iterator
+ may throw a RuntimeException with the io exception as the cause.
+
+ @throws AccessControlException If access is denied
+ @throws FileNotFoundException If <code>f</code> does not exist
+ @throws UnsupportedFileSystemException If file system for <code>f</code> is
+           not supported
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws 
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="deleteOnExit" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Mark a path to be deleted on JVM shutdown.
+ 
+ @param f the existing path to delete.
+
+ @return  true if deleteOnExit is successful, otherwise false.
+
+ @throws AccessControlException If access is denied
+ @throws UnsupportedFileSystemException If file system for <code>f</code> is
+           not supported
+ @throws IOException If an I/O error occurred
+ 
+ Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws 
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="util" return="org.apache.hadoop.fs.FileContext.Util"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="resolve" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnresolvedLinkException" type="org.apache.hadoop.fs.UnresolvedLinkException"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Resolves all symbolic links in the specified path.
+ Returns the new path object.]]>
+      </doc>
+    </method>
+    <method name="resolveIntermediate" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Resolves all symbolic links in the specified path leading up 
+ to, but not including the final path component.
+ @param f path to resolve
+ @return the new path object.]]>
+      </doc>
+    </method>
+    <method name="getStatistics" return="org.apache.hadoop.fs.FileSystem.Statistics"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <doc>
+      <![CDATA[Get the statistics for a particular file system
+ 
+ @param uri
+          the uri to lookup the statistics. Only scheme and authority part
+          of the uri are used as the key to store and lookup.
+ @return a statistics object]]>
+      </doc>
+    </method>
+    <method name="clearStatistics"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Clears all the statistics stored in AbstractFileSystem, for all the file
+ systems.]]>
+      </doc>
+    </method>
+    <method name="printStatistics"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Prints the statistics to standard output. File System is identified by the
+ scheme and authority.]]>
+      </doc>
+    </method>
+    <method name="getAllStatistics" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Map of uri and statistics for each filesystem instantiated. The uri
+         consists of scheme and authority for the filesystem.]]>
+      </doc>
+    </method>
+    <method name="modifyAclEntries"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="aclSpec" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Modifies ACL entries of files and directories.  This method can add new ACL
+ entries or modify the permissions on existing ACL entries.  All existing
+ ACL entries that are not specified in this call are retained without
+ changes.  (Modifications are merged into the current ACL.)
+
+ @param path Path to modify
+ @param aclSpec List{@literal <}AclEntry{@literal >} describing
+ modifications
+ @throws IOException if an ACL could not be modified]]>
+      </doc>
+    </method>
+    <method name="removeAclEntries"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="aclSpec" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Removes ACL entries from files and directories.  Other ACL entries are
+ retained.
+
+ @param path Path to modify
+ @param aclSpec List{@literal <}AclEntry{@literal >} describing entries
+ to remove
+ @throws IOException if an ACL could not be modified]]>
+      </doc>
+    </method>
+    <method name="removeDefaultAcl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Removes all default ACL entries from files and directories.
+
+ @param path Path to modify
+ @throws IOException if an ACL could not be modified]]>
+      </doc>
+    </method>
+    <method name="removeAcl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Removes all but the base ACL entries of files and directories.  The entries
+ for user, group, and others are retained for compatibility with permission
+ bits.
+
+ @param path Path to modify
+ @throws IOException if an ACL could not be removed]]>
+      </doc>
+    </method>
+    <method name="setAcl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="aclSpec" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Fully replaces ACL of files and directories, discarding all existing
+ entries.
+
+ @param path Path to modify
+ @param aclSpec List{@literal <}AclEntry{@literal >} describing
+ modifications, must include entries for user, group, and others for
+ compatibility with permission bits.
+ @throws IOException if an ACL could not be modified]]>
+      </doc>
+    </method>
+    <method name="getAclStatus" return="org.apache.hadoop.fs.permission.AclStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets the ACLs of files and directories.
+
+ @param path Path to get
+ @return RemoteIterator{@literal <}AclStatus{@literal >} which returns
+         each AclStatus
+ @throws IOException if an ACL could not be read]]>
+      </doc>
+    </method>
+    <method name="setXAttr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="byte[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set an xattr of a file or directory.
+ The name must be prefixed with the namespace followed by ".". For example,
+ "user.attr".
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to modify
+ @param name xattr name.
+ @param value xattr value.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setXAttr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="byte[]"/>
+      <param name="flag" type="java.util.EnumSet"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set an xattr of a file or directory.
+ The name must be prefixed with the namespace followed by ".". For example,
+ "user.attr".
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to modify
+ @param name xattr name.
+ @param value xattr value.
+ @param flag xattr set flag
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getXAttr" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get an xattr for a file or directory.
+ The name must be prefixed with the namespace followed by ".". For example,
+ "user.attr".
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to get extended attribute
+ @param name xattr name.
+ @return byte[] xattr value.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getXAttrs" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get all of the xattrs for a file or directory.
+ Only those xattrs for which the logged-in user has permissions to view
+ are returned.
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to get extended attributes
+ @return Map{@literal <}String, byte[]{@literal >} describing the XAttrs
+ of the file or directory
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getXAttrs" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="names" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get all of the xattrs for a file or directory.
+ Only those xattrs for which the logged-in user has permissions to view
+ are returned.
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to get extended attributes
+ @param names XAttr names.
+ @return Map{@literal <}String, byte[]{@literal >} describing the XAttrs
+ of the file or directory
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="removeXAttr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Remove an xattr of a file or directory.
+ The name must be prefixed with the namespace followed by ".". For example,
+ "user.attr".
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to remove extended attribute
+ @param name xattr name
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="listXAttrs" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get all of the xattr names for a file or directory.
+ Only those xattr names which the logged-in user has permissions to view
+ are returned.
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to get extended attributes
+ @return List{@literal <}String{@literal >} of the XAttr names of the
+ file or directory
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createSnapshot" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a snapshot with a default name.
+
+ @param path The directory where snapshots will be taken.
+ @return the snapshot path.
+
+ @throws IOException If an I/O error occurred
+
+ <p>Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="createSnapshot" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="snapshotName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a snapshot.
+
+ @param path The directory where snapshots will be taken.
+ @param snapshotName The name of the snapshot
+ @return the snapshot path.
+
+ @throws IOException If an I/O error occurred
+
+ <p>Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="renameSnapshot"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="snapshotOldName" type="java.lang.String"/>
+      <param name="snapshotNewName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Rename a snapshot.
+
+ @param path The directory path where the snapshot was taken
+ @param snapshotOldName Old name of the snapshot
+ @param snapshotNewName New name of the snapshot
+
+ @throws IOException If an I/O error occurred
+
+ <p>Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="deleteSnapshot"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="snapshotName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Delete a snapshot of a directory.
+
+ @param path The directory that the to-be-deleted snapshot belongs to
+ @param snapshotName The name of the snapshot
+
+ @throws IOException If an I/O error occurred
+
+ <p>Exceptions applicable to file systems accessed over RPC:
+ @throws RpcClientException If an exception occurred in the RPC client
+ @throws RpcServerException If an exception occurred in the RPC server
+ @throws UnexpectedServerException If server implementation throws
+           undeclared exception to RPC server]]>
+      </doc>
+    </method>
+    <method name="satisfyStoragePolicy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the source path to satisfy storage policy.
+ @param path The source path referring to either a directory or a file.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setStoragePolicy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="policyName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the storage policy for a given file or directory.
+
+ @param path file or directory path.
+ @param policyName the name of the target storage policy. The list
+                   of supported Storage policies can be retrieved
+                   via {@link #getAllStoragePolicies}.]]>
+      </doc>
+    </method>
+    <method name="unsetStoragePolicy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Unset the storage policy set for a given file or directory.
+ @param src file or directory path.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getStoragePolicy" return="org.apache.hadoop.fs.BlockStoragePolicySpi"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Query the effective storage policy ID for the given file or directory.
+
+ @param path file or directory path.
+ @return storage policy for give file.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAllStoragePolicies" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Retrieve all the storage policies supported by this file system.
+
+ @return all storage policies supported by this filesystem.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="openFile" return="org.apache.hadoop.fs.FutureDataInputStreamBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+      <doc>
+      <![CDATA[Open a file for reading through a builder API.
+ Ultimately calls {@link #open(Path, int)} unless a subclass
+ executes the open command differently.
+
+ The semantics of this call are therefore the same as that of
+ {@link #open(Path, int)} with one special point: it is in
+ {@code FSDataInputStreamBuilder.build()} in which the open operation
+ takes place -it is there where all preconditions to the operation
+ are checked.
+ @param path file path
+ @return a FSDataInputStreamBuilder object to build the input stream
+ @throws IOException if some early checks cause IO failures.
+ @throws UnsupportedOperationException if support is checked early.]]>
+      </doc>
+    </method>
+    <method name="hasPathCapability" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="capability" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the path capabilities of the bonded {@code AbstractFileSystem}.
+ @param path path to query the capability of.
+ @param capability string to query the stream support for.
+ @return true iff the capability is supported under that FS.
+ @throws IOException path resolution or other IO failure
+ @throws IllegalArgumentException invalid arguments]]>
+      </doc>
+    </method>
+    <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return a set of server default configuration values based on path.
+ @param path path to fetch server defaults
+ @return server default configuration values for path
+ @throws IOException an I/O error occurred]]>
+      </doc>
+    </method>
+    <method name="createMultipartUploader" return="org.apache.hadoop.fs.MultipartUploaderBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="basePath" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a multipart uploader.
+ @param basePath file path under which all files are uploaded
+ @return a MultipartUploaderBuilder object to build the uploader
+ @throws IOException if some early checks cause IO failures.
+ @throws UnsupportedOperationException if support is checked early.]]>
+      </doc>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_PERM" type="org.apache.hadoop.fs.permission.FsPermission"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default permission for directory and symlink
+ In previous versions, this default permission was also used to
+ create files, so files created end up with ugo+x permission.
+ See HADOOP-9155 for detail. 
+ Two new constants are added to solve this, please use 
+ {@link FileContext#DIR_DEFAULT_PERM} for directory, and use
+ {@link FileContext#FILE_DEFAULT_PERM} for file.
+ This constant is kept for compatibility.]]>
+      </doc>
+    </field>
+    <field name="DIR_DEFAULT_PERM" type="org.apache.hadoop.fs.permission.FsPermission"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default permission for directory]]>
+      </doc>
+    </field>
+    <field name="FILE_DEFAULT_PERM" type="org.apache.hadoop.fs.permission.FsPermission"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default permission for file]]>
+      </doc>
+    </field>
+    <field name="SHUTDOWN_HOOK_PRIORITY" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Priority of the FileContext shutdown hook.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[The FileContext class provides an interface for users of the Hadoop
+ file system. It exposes a number of file system operations, e.g. create,
+ open, list.
+ 
+ <h2>Path Names</h2>
+ 
+ The Hadoop file system supports a URI namespace and URI names. This enables
+ multiple types of file systems to be referenced using fully-qualified URIs.
+ Two common Hadoop file system implementations are
+ <ul>
+ <li>the local file system: file:///path
+ <li>the HDFS file system: hdfs://nnAddress:nnPort/path
+ </ul>
+ 
+ The Hadoop file system also supports additional naming schemes besides URIs.
+ Hadoop has the concept of a <i>default file system</i>, which implies a
+ default URI scheme and authority. This enables <i>slash-relative names</i>
+ relative to the default FS, which are more convenient for users and
+ application writers. The default FS is typically set by the user's
+ environment, though it can also be manually specified.
+ <p>
+ 
+ Hadoop also supports <i>working-directory-relative</i> names, which are paths
+ relative to the current working directory (similar to Unix). The working
+ directory can be in a different file system than the default FS.
+ <p>
+ Thus, Hadoop path names can be specified as one of the following:
+ <ul>
+ <li>a fully-qualified URI: scheme://authority/path (e.g.
+ hdfs://nnAddress:nnPort/foo/bar)
+ <li>a slash-relative name: path relative to the default file system (e.g.
+ /foo/bar)
+ <li>a working-directory-relative name: path relative to the working dir (e.g.
+ foo/bar)
+ </ul>
+  Relative paths with scheme (scheme:foo/bar) are illegal.
+  
+ <h2>Role of FileContext and Configuration Defaults</h2>
+
+ The FileContext is the analogue of per-process file-related state in Unix. It
+ contains two properties:
+ 
+ <ul>
+ <li>the default file system (for resolving slash-relative names)
+ <li>the umask (for file permissions)
+ </ul>
+ In general, these properties are obtained from the default configuration file
+ in the user's environment (see {@link Configuration}).
+ 
+ Further file system properties are specified on the server-side. File system
+ operations default to using these server-side defaults unless otherwise
+ specified.
+ <p>
+ The file system related server-side defaults are:
+  <ul>
+  <li> the home directory (default is "/user/userName")
+  <li> the initial wd (only for local fs)
+  <li> replication factor
+  <li> block size
+  <li> buffer size
+  <li> encryptDataTransfer 
+  <li> checksum option. (checksumType and  bytesPerChecksum)
+  </ul>
+
+ <h2>Example Usage</h2>
+
+ Example 1: use the default config read from the $HADOOP_CONFIG/core.xml.
+   Unspecified values come from core-defaults.xml in the release jar.
+  <ul>  
+  <li> myFContext = FileContext.getFileContext(); // uses the default config
+                                                // which has your default FS 
+  <li>  myFContext.create(path, ...);
+  <li>  myFContext.setWorkingDir(path);
+  <li>  myFContext.open (path, ...);  
+  <li>...
+  </ul>  
+ Example 2: Get a FileContext with a specific URI as the default FS
+  <ul>  
+  <li> myFContext = FileContext.getFileContext(URI);
+  <li> myFContext.create(path, ...);
+  <li>...
+ </ul>
+ Example 3: FileContext with local file system as the default
+  <ul> 
+  <li> myFContext = FileContext.getLocalFSFileContext();
+  <li> myFContext.create(path, ...);
+  <li> ...
+  </ul> 
+ Example 4: Use a specific config, ignoring $HADOOP_CONFIG
+  Generally you should not need use a config unless you are doing
+   <ul> 
+   <li> configX = someConfigSomeOnePassedToYou;
+   <li> myFContext = getFileContext(configX); // configX is not changed,
+                                              // is passed down 
+   <li> myFContext.create(path, ...);
+   <li>...
+  </ul>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.FileContext -->
+  <!-- start class org.apache.hadoop.fs.FileStatus -->
+  <class name="FileStatus" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <implements name="java.lang.Comparable"/>
+    <implements name="java.io.Serializable"/>
+    <implements name="java.io.ObjectInputValidation"/>
+    <constructor name="FileStatus"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileStatus" type="long, boolean, int, long, long, org.apache.hadoop.fs.Path"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor for file systems on which symbolic links are not supported]]>
+      </doc>
+    </constructor>
+    <constructor name="FileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path, boolean, boolean, boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path, java.util.Set"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileStatus" type="org.apache.hadoop.fs.FileStatus"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copy constructor.
+
+ @param other FileStatus to copy]]>
+      </doc>
+    </constructor>
+    <method name="attributes" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="acl" type="boolean"/>
+      <param name="crypt" type="boolean"/>
+      <param name="ec" type="boolean"/>
+      <param name="sn" type="boolean"/>
+      <doc>
+      <![CDATA[Convert boolean attributes to a set of flags.
+ @param acl   See {@link AttrFlags#HAS_ACL}.
+ @param crypt See {@link AttrFlags#HAS_CRYPT}.
+ @param ec    See {@link AttrFlags#HAS_EC}.
+ @param sn    See {@link AttrFlags#SNAPSHOT_ENABLED}.
+ @return converted set of flags.]]>
+      </doc>
+    </method>
+    <method name="getLen" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the length of this file, in bytes.
+ @return the length of this file, in bytes.]]>
+      </doc>
+    </method>
+    <method name="isFile" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Is this a file?
+ @return true if this is a file]]>
+      </doc>
+    </method>
+    <method name="isDirectory" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Is this a directory?
+ @return true if this is a directory]]>
+      </doc>
+    </method>
+    <method name="isDir" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="Use {@link FileStatus#isFile()},
+ {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
+ instead.">
+      <doc>
+      <![CDATA[Old interface, instead use the explicit {@link FileStatus#isFile()},
+ {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
+ @return true if this is a directory.
+ @deprecated Use {@link FileStatus#isFile()},
+ {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
+ instead.]]>
+      </doc>
+    </method>
+    <method name="isSymlink" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Is this a symbolic link?
+ @return true if this is a symbolic link]]>
+      </doc>
+    </method>
+    <method name="getBlockSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the block size of the file.
+ @return the number of bytes]]>
+      </doc>
+    </method>
+    <method name="getReplication" return="short"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the replication factor of a file.
+ @return the replication factor of a file.]]>
+      </doc>
+    </method>
+    <method name="getModificationTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the modification time of the file.
+ @return the modification time of file in milliseconds since January 1, 1970 UTC.]]>
+      </doc>
+    </method>
+    <method name="getAccessTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the access time of the file.
+ @return the access time of file in milliseconds since January 1, 1970 UTC.]]>
+      </doc>
+    </method>
+    <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get FsPermission associated with the file.
+ @return permission. If a filesystem does not have a notion of permissions
+         or if permissions could not be determined, then default 
+         permissions equivalent of "rwxrwxrwx" is returned.]]>
+      </doc>
+    </method>
+    <method name="hasAcl" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Tell whether the underlying file or directory has ACLs set.
+
+ @return true if the underlying file or directory has ACLs set.]]>
+      </doc>
+    </method>
+    <method name="isEncrypted" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Tell whether the underlying file or directory is encrypted or not.
+
+ @return true if the underlying file is encrypted.]]>
+      </doc>
+    </method>
+    <method name="isErasureCoded" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Tell whether the underlying file or directory is erasure coded or not.
+
+ @return true if the underlying file or directory is erasure coded.]]>
+      </doc>
+    </method>
+    <method name="isSnapshotEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Check if directory is Snapshot enabled or not.
+
+ @return true if directory is snapshot enabled]]>
+      </doc>
+    </method>
+    <method name="getOwner" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the owner of the file.
+ @return owner of the file. The string could be empty if there is no
+         notion of owner of a file in a filesystem or if it could not 
+         be determined (rare).]]>
+      </doc>
+    </method>
+    <method name="getGroup" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the group associated with the file.
+ @return group for the file. The string could be empty if there is no
+         notion of group of a file in a filesystem or if it could not 
+         be determined (rare).]]>
+      </doc>
+    </method>
+    <method name="getPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setPath"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="setPermission"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <doc>
+      <![CDATA[Sets permission.
+ @param permission if permission is null, default value is set]]>
+      </doc>
+    </method>
+    <method name="setOwner"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="owner" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Sets owner.
+ @param owner if it is null, default value is set]]>
+      </doc>
+    </method>
+    <method name="setGroup"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="group" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Sets group.
+ @param group if it is null, default value is set]]>
+      </doc>
+    </method>
+    <method name="getSymlink" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@return The contents of the symbolic link.]]>
+      </doc>
+    </method>
+    <method name="setSymlink"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.fs.FileStatus"/>
+      <doc>
+      <![CDATA[Compare this FileStatus to another FileStatus
+ @param   o the FileStatus to be compared.
+ @return  a negative integer, zero, or a positive integer as this object
+   is less than, equal to, or greater than the specified object.]]>
+      </doc>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Compare this FileStatus to another FileStatus.
+ This method was added back by HADOOP-14683 to keep binary compatibility.
+
+ @param   o the FileStatus to be compared.
+ @return  a negative integer, zero, or a positive integer as this object
+   is less than, equal to, or greater than the specified object.
+ @throws ClassCastException if the specified object is not FileStatus]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Compare if this object is equal to another object
+ @param   o the object to be compared.
+ @return  true if two file status has the same path name; false if not.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns a hash code value for the object, which is defined as
+ the hash code of the path name.
+
+ @return  a hash code value for the path name.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use the {@link PBHelper} and protobuf serialization directly.">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read instance encoded as protobuf from stream.
+ @param in Input stream
+ @see PBHelper#convert(FileStatus)
+ @deprecated Use the {@link PBHelper} and protobuf serialization directly.]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use the {@link PBHelper} and protobuf serialization directly.">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write instance encoded as protobuf to stream.
+ @param out Output stream
+ @see PBHelper#convert(FileStatus)
+ @deprecated Use the {@link PBHelper} and protobuf serialization directly.]]>
+      </doc>
+    </method>
+    <method name="validateObject"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="InvalidObjectException" type="java.io.InvalidObjectException"/>
+    </method>
+    <field name="NONE" type="java.util.Set"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Shared, empty set of attributes (a common case for FileStatus).]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[Interface that represents the client side information for a file.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.FileStatus -->
+  <!-- start class org.apache.hadoop.fs.FileSystem -->
+  <class name="FileSystem" extends="org.apache.hadoop.conf.Configured"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Closeable"/>
+    <implements name="org.apache.hadoop.security.token.DelegationTokenIssuer"/>
+    <implements name="org.apache.hadoop.fs.PathCapabilities"/>
+    <constructor name="FileSystem"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="get" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="user" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get a FileSystem instance based on the uri, the passed in
+ configuration and the user.
+ @param uri of the filesystem
+ @param conf the configuration to use
+ @param user to perform the get as
+ @return the filesystem instance
+ @throws IOException failure to load
+ @throws InterruptedException If the {@code UGI.doAs()} call was
+ somehow interrupted.]]>
+      </doc>
+    </method>
+    <method name="get" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns the configured FileSystem implementation.
+ @param conf the configuration to use]]>
+      </doc>
+    </method>
+    <method name="getDefaultUri" return="java.net.URI"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the default FileSystem URI from a configuration.
+ @param conf the configuration to use
+ @return the uri of the default filesystem]]>
+      </doc>
+    </method>
+    <method name="setDefaultUri"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="uri" type="java.net.URI"/>
+      <doc>
+      <![CDATA[Set the default FileSystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+      </doc>
+    </method>
+    <method name="setDefaultUri"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="uri" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the default FileSystem URI in a configuration.
+ @param conf the configuration to alter
+ @param uri the new default filesystem uri]]>
+      </doc>
+    </method>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Initialize a FileSystem.
+
+ Called after the new FileSystem instance is constructed, and before it
+ is ready for use.
+
+ FileSystem implementations overriding this method MUST forward it to
+ their superclass, though the order in which it is done, and whether
+ to alter the configuration before the invocation are options of the
+ subclass.
+ @param name a URI whose authority section names the host, port, etc.
+   for this FileSystem
+ @param conf the configuration
+ @throws IOException on any failure to initialize this instance.
+ @throws IllegalArgumentException if the URI is considered invalid.]]>
+      </doc>
+    </method>
+    <method name="getScheme" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the protocol scheme for this FileSystem.
+ <p>
+ This implementation throws an <code>UnsupportedOperationException</code>.
+
+ @return the protocol scheme for this FileSystem.
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default).]]>
+      </doc>
+    </method>
+    <method name="getUri" return="java.net.URI"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns a URI which identifies this FileSystem.
+
+ @return the URI of this filesystem.]]>
+      </doc>
+    </method>
+    <method name="getCanonicalUri" return="java.net.URI"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return a canonicalized form of this FileSystem's URI.
+
+ The default implementation simply calls {@link #canonicalizeUri(URI)}
+ on the filesystem's own URI, so subclasses typically only need to
+ implement that method.
+
+ @see #canonicalizeUri(URI)]]>
+      </doc>
+    </method>
+    <method name="canonicalizeUri" return="java.net.URI"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <doc>
+      <![CDATA[Canonicalize the given URI.
+
+ This is implementation-dependent, and may for example consist of
+ canonicalizing the hostname using DNS and adding the default
+ port if not specified.
+
+ The default implementation simply fills in the default port if
+ not specified and if {@link #getDefaultPort()} returns a
+ default port.
+
+ @return URI
+ @see NetUtils#getCanonicalUri(URI, int)]]>
+      </doc>
+    </method>
+    <method name="getDefaultPort" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the default port for this FileSystem.
+ @return the default port or 0 if there isn't one]]>
+      </doc>
+    </method>
+    <method name="getFSofPath" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="absOrFqPath" type="org.apache.hadoop.fs.Path"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getCanonicalServiceName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a canonical service name for this FileSystem.
+ The token cache is the only user of the canonical service name,
+ and uses it to lookup this FileSystem's service tokens.
+ If the file system provides a token of its own then it must have a
+ canonical name, otherwise the canonical name can be null.
+
+ Default implementation: If the FileSystem has child file systems
+ (such as an embedded file system) then it is assumed that the FS has no
+ tokens of its own and hence returns a null name; otherwise a service
+ name is built using Uri and port.
+
+ @return a service string that uniquely identifies this file system, null
+         if the filesystem does not implement tokens
+ @see SecurityUtil#buildDTServiceName(URI, int)]]>
+      </doc>
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="call {@link #getUri()} instead.">
+      <doc>
+      <![CDATA[@deprecated call {@link #getUri()} instead.]]>
+      </doc>
+    </method>
+    <method name="getNamed" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="call {@link #get(URI, Configuration)} instead.">
+      <param name="name" type="java.lang.String"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@deprecated call {@link #get(URI, Configuration)} instead.]]>
+      </doc>
+    </method>
+    <method name="getLocal" return="org.apache.hadoop.fs.LocalFileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the local FileSystem.
+ @param conf the configuration to configure the FileSystem with
+ if it is newly instantiated.
+ @return a LocalFileSystem
+ @throws IOException if somehow the local FS cannot be instantiated.]]>
+      </doc>
+    </method>
+    <method name="get" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a FileSystem for this URI's scheme and authority.
+ <ol>
+ <li>
+   If the configuration has the property
+   {@code "fs.$SCHEME.impl.disable.cache"} set to true,
+   a new instance will be created, initialized with the supplied URI and
+   configuration, then returned without being cached.
+ </li>
+ <li>
+   If the there is a cached FS instance matching the same URI, it will
+   be returned.
+ </li>
+ <li>
+   Otherwise: a new FS instance will be created, initialized with the
+   configuration and URI, cached and returned to the caller.
+ </li>
+ </ol>
+ @throws IOException if the FileSystem cannot be instantiated.]]>
+      </doc>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="user" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Returns the FileSystem for this URI's scheme and authority and the
+ given user. Internally invokes {@link #newInstance(URI, Configuration)}
+ @param uri of the filesystem
+ @param conf the configuration to use
+ @param user to perform the get as
+ @return filesystem instance
+ @throws IOException if the FileSystem cannot be instantiated.
+ @throws InterruptedException If the {@code UGI.doAs()} call was
+         somehow interrupted.]]>
+      </doc>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <param name="config" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns the FileSystem for this URI's scheme and authority.
+ The entire URI is passed to the FileSystem instance's initialize method.
+ This always returns a new FileSystem object.
+ @param uri FS URI
+ @param config configuration to use
+ @return the new FS instance
+ @throws IOException FS creation or initialization failure.]]>
+      </doc>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns a unique configured FileSystem implementation for the default
+ filesystem of the supplied configuration.
+ This always returns a new FileSystem object.
+ @param conf the configuration to use
+ @return the new FS instance
+ @throws IOException FS creation or initialization failure.]]>
+      </doc>
+    </method>
+    <method name="newInstanceLocal" return="org.apache.hadoop.fs.LocalFileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a unique local FileSystem object.
+ @param conf the configuration to configure the FileSystem with
+ @return a new LocalFileSystem object.
+ @throws IOException FS creation or initialization failure.]]>
+      </doc>
+    </method>
+    <method name="closeAll"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Close all cached FileSystem instances. After this operation, they
+ may not be used in any operations.
+
+ @throws IOException a problem arose closing one or more filesystem.]]>
+      </doc>
+    </method>
+    <method name="closeAllForUGI"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Close all cached FileSystem instances for a given UGI.
+ Be sure those filesystems are not used anymore.
+ @param ugi user group info to close
+ @throws IOException a problem arose closing one or more filesystem.]]>
+      </doc>
+    </method>
+    <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Qualify a path to one which uses this FileSystem and, if relative,
+ made absolute.
+ @param path to qualify.
+ @return this path if it contains a scheme and authority and is absolute, or
+ a new path that includes a path and authority and is fully qualified
+ @see Path#makeQualified(URI, Path)
+ @throws IllegalArgumentException if the path has a schema/URI different
+ from this FileSystem.]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a file with the provided permission.
+
+ The permission of the file is set to be the provided permission as in
+ setPermission, not permission{@literal &~}umask
+
+ The HDFS implementation is implemented using two RPCs.
+ It is understood that it is inefficient,
+ but the implementation is thread-safe. The other option is to change the
+ value of umask in configuration to be 0, but it is not thread-safe.
+
+ @param fs FileSystem
+ @param file the name of the file to be created
+ @param permission the permission of the file
+ @return an output stream
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="mkdirs" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="dir" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a directory with the provided permission.
+ The permission of the directory is set to be the provided permission as in
+ setPermission, not permission{@literal &~}umask
+
+ @see #create(FileSystem, Path, FsPermission)
+
+ @param fs FileSystem handle
+ @param dir the name of the directory to be created
+ @param permission the permission of the directory
+ @return true if the directory creation succeeds; false otherwise
+ @throws IOException A problem creating the directories.]]>
+      </doc>
+    </method>
+    <method name="checkPath"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Check that a Path belongs to this FileSystem.
+
+ The base implementation performs case insensitive equality checks
+ of the URIs' schemes and authorities. Subclasses may implement slightly
+ different checks.
+ @param path to check
+ @throws IllegalArgumentException if the path is not considered to be
+ part of this FileSystem.]]>
+      </doc>
+    </method>
+    <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="start" type="long"/>
+      <param name="len" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file.  For nonexistent
+ file or regions, {@code null} is returned.
+
+ <pre>
+   if f == null :
+     result = null
+   elif f.getLen() {@literal <=} start:
+     result = []
+   else result = [ locations(FS, b) for b in blocks(FS, p, s, s+l)]
+ </pre>
+ This call is most helpful with and distributed filesystem
+ where the hostnames of machines that contain blocks of the given file
+ can be determined.
+
+ The default implementation returns an array containing one element:
+ <pre>
+ BlockLocation( { "localhost:9866" },  { "localhost" }, 0, file.getLen())
+ </pre>
+
+ In HDFS, if file is three-replicated, the returned array contains
+ elements like:
+ <pre>
+ BlockLocation(offset: 0, length: BLOCK_SIZE,
+   hosts: {"host1:9866", "host2:9866, host3:9866"})
+ BlockLocation(offset: BLOCK_SIZE, length: BLOCK_SIZE,
+   hosts: {"host2:9866", "host3:9866, host4:9866"})
+ </pre>
+
+ And if a file is erasure-coded, the returned BlockLocation are logical
+ block groups.
+
+ Suppose we have a RS_3_2 coded file (3 data units and 2 parity units).
+ 1. If the file size is less than one stripe size, say 2 * CELL_SIZE, then
+ there will be one BlockLocation returned, with 0 offset, actual file size
+ and 4 hosts (2 data blocks and 2 parity blocks) hosting the actual blocks.
+ 3. If the file size is less than one group size but greater than one
+ stripe size, then there will be one BlockLocation returned, with 0 offset,
+ actual file size with 5 hosts (3 data blocks and 2 parity blocks) hosting
+ the actual blocks.
+ 4. If the file size is greater than one group size, 3 * BLOCK_SIZE + 123
+ for example, then the result will be like:
+ <pre>
+ BlockLocation(offset: 0, length: 3 * BLOCK_SIZE, hosts: {"host1:9866",
+   "host2:9866","host3:9866","host4:9866","host5:9866"})
+ BlockLocation(offset: 3 * BLOCK_SIZE, length: 123, hosts: {"host1:9866",
+   "host4:9866", "host5:9866"})
+ </pre>
+
+ @param file FilesStatus to get data from
+ @param start offset into the given file
+ @param len length for which to get locations for
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <param name="start" type="long"/>
+      <param name="len" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return an array containing hostnames, offset and size of
+ portions of the given file.  For a nonexistent
+ file or regions, {@code null} is returned.
+
+ This call is most helpful with location-aware distributed
+ filesystems, where it returns hostnames of machines that
+ contain the given file.
+
+ A FileSystem will normally return the equivalent result
+ of passing the {@code FileStatus} of the path to
+ {@link #getFileBlockLocations(FileStatus, long, long)}
+
+ @param p path is used to identify an FS since an FS could have
+          another FS that it could be delegating the call to
+ @param start offset into the given file
+ @param len length for which to get locations for
+ @throws FileNotFoundException when the path does not exist
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #getServerDefaults(Path)} instead">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return a set of server default configuration values.
+ @return server default configuration values
+ @throws IOException IO failure
+ @deprecated use {@link #getServerDefaults(Path)} instead]]>
+      </doc>
+    </method>
+    <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return a set of server default configuration values.
+ @param p path is used to identify an FS since an FS could have
+          another FS that it could be delegating the call to
+ @return server default configuration values
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="resolvePath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the fully-qualified path of path, resolving the path
+ through any symlinks or mount point.
+ @param p path to be resolved
+ @return fully qualified path
+ @throws FileNotFoundException if the path is not present
+ @throws IOException for any other error]]>
+      </doc>
+    </method>
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="bufferSize" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file to open
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fd" type="org.apache.hadoop.fs.PathHandle"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Open an FSDataInputStream matching the PathHandle instance. The
+ implementation may encode metadata in PathHandle to address the
+ resource directly and verify that the resource referenced
+ satisfies constraints specified at its construciton.
+ @param fd PathHandle object returned by the FS authority.
+ @throws InvalidPathHandleException If {@link PathHandle} constraints are
+                                    not satisfied
+ @throws IOException IO failure
+ @throws UnsupportedOperationException If {@link #open(PathHandle, int)}
+                                       not overridden by subclass]]>
+      </doc>
+    </method>
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fd" type="org.apache.hadoop.fs.PathHandle"/>
+      <param name="bufferSize" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Open an FSDataInputStream matching the PathHandle instance. The
+ implementation may encode metadata in PathHandle to address the
+ resource directly and verify that the resource referenced
+ satisfies constraints specified at its construciton.
+ @param fd PathHandle object returned by the FS authority.
+ @param bufferSize the size of the buffer to use
+ @throws InvalidPathHandleException If {@link PathHandle} constraints are
+                                    not satisfied
+ @throws IOException IO failure
+ @throws UnsupportedOperationException If not overridden by subclass]]>
+      </doc>
+    </method>
+    <method name="getPathHandle" return="org.apache.hadoop.fs.PathHandle"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="opt" type="org.apache.hadoop.fs.Options.HandleOpt[]"/>
+      <doc>
+      <![CDATA[Create a durable, serializable handle to the referent of the given
+ entity.
+ @param stat Referent in the target FileSystem
+ @param opt If absent, assume {@link HandleOpt#path()}.
+ @throws IllegalArgumentException If the FileStatus does not belong to
+         this FileSystem
+ @throws UnsupportedOperationException If {@link #createPathHandle}
+         not overridden by subclass.
+ @throws UnsupportedOperationException If this FileSystem cannot enforce
+         the specified constraints.]]>
+      </doc>
+    </method>
+    <method name="createPathHandle" return="org.apache.hadoop.fs.PathHandle"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="opt" type="org.apache.hadoop.fs.Options.HandleOpt[]"/>
+      <doc>
+      <![CDATA[Hook to implement support for {@link PathHandle} operations.
+ @param stat Referent in the target FileSystem
+ @param opt Constraints that determine the validity of the
+            {@link PathHandle} reference.]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.
+ @param f the file to create
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="overwrite" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create an FSDataOutputStream at the indicated Path.
+ @param f the file to create
+ @param overwrite if a file with this name already exists, then if true,
+   the file will be overwritten, and if false an exception will be thrown.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.
+ @param f the file to create
+ @param progress to report progress
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="replication" type="short"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create an FSDataOutputStream at the indicated Path.
+ Files are overwritten by default.
+ @param f the file to create
+ @param replication the replication factor
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="replication" type="short"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ Files are overwritten by default.
+ @param f the file to create
+ @param replication the replication factor
+ @param progress to report progress
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="bufferSize" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create an FSDataOutputStream at the indicated Path.
+ @param f the file to create
+ @param overwrite if a path with this name already exists, then if true,
+   the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="bufferSize" type="int"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create an {@link FSDataOutputStream} at the indicated Path
+ with write-progress reporting.
+
+ The frequency of callbacks is implementation-specific; it may be "none".
+ @param f the path of the file to open
+ @param overwrite if a file with this name already exists, then if true,
+   the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create an FSDataOutputStream at the indicated Path.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+   the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+   the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param permission file permission
+ @param overwrite if a file with this name already exists, then if true,
+   the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @param blockSize block size
+ @param progress the progress reporter
+ @throws IOException IO failure
+ @see #setPermission(Path, FsPermission)]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="flags" type="java.util.EnumSet"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create an FSDataOutputStream at the indicated Path with write-progress
+ reporting.
+ @param f the file name to open
+ @param permission file permission
+ @param flags {@link CreateFlag}s to use for this stream.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @param blockSize block size
+ @param progress the progress reporter
+ @throws IOException IO failure
+ @see #setPermission(Path, FsPermission)]]>
+      </doc>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="flags" type="java.util.EnumSet"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <param name="checksumOpt" type="org.apache.hadoop.fs.Options.ChecksumOpt"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create an FSDataOutputStream at the indicated Path with a custom
+ checksum option.
+ @param f the file name to open
+ @param permission file permission
+ @param flags {@link CreateFlag}s to use for this stream.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @param blockSize block size
+ @param progress the progress reporter
+ @param checksumOpt checksum parameter. If null, the values
+        found in conf will be used.
+ @throws IOException IO failure
+ @see #setPermission(Path, FsPermission)]]>
+      </doc>
+    </method>
+    <method name="primitiveCreate" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="flag" type="java.util.EnumSet"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <param name="checksumOpt" type="org.apache.hadoop.fs.Options.ChecksumOpt"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This create has been added to support the FileContext that processes
+ the permission with umask before calling this method.
+ This a temporary method added to support the transition from FileSystem
+ to FileContext for user applications.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="primitiveMkdir" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This version of the mkdirs method assumes that the permission is absolute.
+ It has been added to support the FileContext that processes the permission
+ with umask before calling this method.
+ This a temporary method added to support the transition from FileSystem
+ to FileContext for user applications.
+ @param f path
+ @param absolutePermission permissions
+ @return true if the directory was actually created.
+ @throws IOException IO failure
+ @see #mkdirs(Path, FsPermission)]]>
+      </doc>
+    </method>
+    <method name="primitiveMkdir"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="createParent" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This version of the mkdirs method assumes that the permission is absolute.
+ It has been added to support the FileContext that processes the permission
+ with umask before calling this method.
+ This a temporary method added to support the transition from FileSystem
+ to FileContext for user applications.]]>
+      </doc>
+    </method>
+    <method name="createNonRecursive" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting. Same as create(), except fails if parent directory doesn't
+ already exist.
+ @param f the file name to open
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @param blockSize block size
+ @param progress the progress reporter
+ @throws IOException IO failure
+ @see #setPermission(Path, FsPermission)]]>
+      </doc>
+    </method>
+    <method name="createNonRecursive" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting. Same as create(), except fails if parent directory doesn't
+ already exist.
+ @param f the file name to open
+ @param permission file permission
+ @param overwrite if a file with this name already exists, then if true,
+ the file will be overwritten, and if false an error will be thrown.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @param blockSize block size
+ @param progress the progress reporter
+ @throws IOException IO failure
+ @see #setPermission(Path, FsPermission)]]>
+      </doc>
+    </method>
+    <method name="createNonRecursive" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="flags" type="java.util.EnumSet"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress
+ reporting. Same as create(), except fails if parent directory doesn't
+ already exist.
+ @param f the file name to open
+ @param permission file permission
+ @param flags {@link CreateFlag}s to use for this stream.
+ @param bufferSize the size of the buffer to be used.
+ @param replication required block replication for the file.
+ @param blockSize block size
+ @param progress the progress reporter
+ @throws IOException IO failure
+ @see #setPermission(Path, FsPermission)]]>
+      </doc>
+    </method>
+    <method name="createNewFile" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Creates the given Path as a brand-new zero-length file.  If
+ create fails, or if it already existed, return false.
+ <i>Important: the default implementation is not atomic</i>
+ @param f path to use for create
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Append to an existing file (optional operation).
+ Same as
+ {@code append(f, getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
+     IO_FILE_BUFFER_SIZE_DEFAULT), null)}
+ @param f the existing file to be appended.
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default).]]>
+      </doc>
+    </method>
+    <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="bufferSize" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Append to an existing file (optional operation).
+ Same as append(f, bufferSize, null).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default).]]>
+      </doc>
+    </method>
+    <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="bufferSize" type="int"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Append to an existing file (optional operation).
+ @param f the existing file to be appended.
+ @param bufferSize the size of the buffer to be used.
+ @param progress for reporting progress if it is not null.
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default).]]>
+      </doc>
+    </method>
+    <method name="concat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="trg" type="org.apache.hadoop.fs.Path"/>
+      <param name="psrcs" type="org.apache.hadoop.fs.Path[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Concat existing files together.
+ @param trg the path to the target destination.
+ @param psrcs the paths to the sources to use for the concatenation.
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default).]]>
+      </doc>
+    </method>
+    <method name="getReplication" return="short"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #getFileStatus(Path)} instead">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the replication factor.
+
+ @deprecated Use {@link #getFileStatus(Path)} instead
+ @param src file name
+ @return file replication
+ @throws FileNotFoundException if the path does not resolve.
+ @throws IOException an IO failure]]>
+      </doc>
+    </method>
+    <method name="setReplication" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="replication" type="short"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the replication for an existing file.
+ If a filesystem does not support replication, it will always
+ return true: the check for a file existing may be bypassed.
+ This is the default behavior.
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful, or the feature in unsupported;
+         false if replication is supported but the file does not exist,
+         or is a directory]]>
+      </doc>
+    </method>
+    <method name="rename" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Renames Path src to Path dst.
+ @param src path to be renamed
+ @param dst new path after rename
+ @throws IOException on failure
+ @return true if rename is successful]]>
+      </doc>
+    </method>
+    <method name="rename"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Renames Path src to Path dst
+ <ul>
+   <li>Fails if src is a file and dst is a directory.</li>
+   <li>Fails if src is a directory and dst is a file.</li>
+   <li>Fails if the parent of dst does not exist or is a file.</li>
+ </ul>
+ <p>
+ If OVERWRITE option is not passed as an argument, rename fails
+ if the dst already exists.
+ <p>
+ If OVERWRITE option is passed as an argument, rename overwrites
+ the dst if it is a file or an empty directory. Rename fails if dst is
+ a non-empty directory.
+ <p>
+ Note that atomicity of rename is dependent on the file system
+ implementation. Please refer to the file system documentation for
+ details. This default implementation is non atomic.
+ <p>
+ This method is deprecated since it is a temporary method added to
+ support the transition from FileSystem to FileContext for user
+ applications.
+
+ @param src path to be renamed
+ @param dst new path after rename
+ @throws FileNotFoundException src path does not exist, or the parent
+ path of dst does not exist.
+ @throws FileAlreadyExistsException dest path exists and is a file
+ @throws ParentNotDirectoryException if the parent path of dest is not
+ a directory
+ @throws IOException on failure]]>
+      </doc>
+    </method>
+    <method name="truncate" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="newLength" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Truncate the file in the indicated path to the indicated size.
+ <ul>
+   <li>Fails if path is a directory.</li>
+   <li>Fails if path does not exist.</li>
+   <li>Fails if path is not closed.</li>
+   <li>Fails if new size is greater than current size.</li>
+ </ul>
+ @param f The path to the file to be truncated
+ @param newLength The size the file is to be truncated to
+
+ @return <code>true</code> if the file has been truncated to the desired
+ <code>newLength</code> and is immediately available to be reused for
+ write operations such as <code>append</code>, or
+ <code>false</code> if a background process of adjusting the length of
+ the last block has been started, and clients should wait for it to
+ complete before proceeding with further file updates.
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default).]]>
+      </doc>
+    </method>
+    <method name="delete" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #delete(Path, boolean)} instead.">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Delete a file/directory.
+ @deprecated Use {@link #delete(Path, boolean)} instead.]]>
+      </doc>
+    </method>
+    <method name="delete" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="recursive" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Delete a file.
+
+ @param f the path to delete.
+ @param recursive if path is a directory and set to
+ true, the directory is deleted else throws an exception. In
+ case of a file the recursive can be set to either true or false.
+ @return  true if delete is successful else false.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="deleteOnExit" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Mark a path to be deleted when its FileSystem is closed.
+ When the JVM shuts down cleanly, all cached FileSystem objects will be
+ closed automatically. These the marked paths will be deleted as a result.
+
+ If a FileSystem instance is not cached, i.e. has been created with
+ {@link #createFileSystem(URI, Configuration)}, then the paths will
+ be deleted in when {@link #close()} is called on that instance.
+
+ The path must exist in the filesystem at the time of the method call;
+ it does not have to exist at the time of JVM shutdown.
+
+ Notes
+ <ol>
+   <li>Clean shutdown of the JVM cannot be guaranteed.</li>
+   <li>The time to shut down a FileSystem will depends on the number of
+   files to delete. For filesystems where the cost of checking
+   for the existence of a file/directory and the actual delete operation
+   (for example: object stores) is high, the time to shutdown the JVM can be
+   significantly extended by over-use of this feature.</li>
+   <li>Connectivity problems with a remote filesystem may delay shutdown
+   further, and may cause the files to not be deleted.</li>
+ </ol>
+ @param f the path to delete.
+ @return  true if deleteOnExit is successful, otherwise false.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="cancelDeleteOnExit" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Cancel the scheduled deletion of the path when the FileSystem is closed.
+ @param f the path to cancel deletion
+ @return true if the path was found in the delete-on-exit list.]]>
+      </doc>
+    </method>
+    <method name="processDeleteOnExit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Delete all paths that were marked as delete-on-exit. This recursively
+ deletes all files and directories in the specified paths.
+
+ The time to process this operation is {@code O(paths)}, with the actual
+ time dependent on the time for existence and deletion operations to
+ complete, successfully or not.]]>
+      </doc>
+    </method>
+    <method name="exists" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Check if a path exists.
+
+ It is highly discouraged to call this method back to back with other
+ {@link #getFileStatus(Path)} calls, as this will involve multiple redundant
+ RPC calls in HDFS.
+
+ @param f source path
+ @return true if the path exists
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="isDirectory" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #getFileStatus(Path)} instead">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[True iff the named path is a directory.
+ Note: Avoid using this method. Instead reuse the FileStatus
+ returned by getFileStatus() or listStatus() methods.
+
+ @param f path to check
+ @throws IOException IO failure
+ @deprecated Use {@link #getFileStatus(Path)} instead]]>
+      </doc>
+    </method>
+    <method name="isFile" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #getFileStatus(Path)} instead">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[True iff the named path is a regular file.
+ Note: Avoid using this method. Instead reuse the FileStatus
+ returned by {@link #getFileStatus(Path)} or listStatus() methods.
+
+ @param f path to check
+ @throws IOException IO failure
+ @deprecated Use {@link #getFileStatus(Path)} instead]]>
+      </doc>
+    </method>
+    <method name="getLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #getFileStatus(Path)} instead.">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The number of bytes in a file.
+ @return the number of bytes; 0 for a directory
+ @deprecated Use {@link #getFileStatus(Path)} instead.
+ @throws FileNotFoundException if the path does not resolve
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the {@link ContentSummary} of a given {@link Path}.
+ @param f path to use
+ @throws FileNotFoundException if the path does not resolve
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="getQuotaUsage" return="org.apache.hadoop.fs.QuotaUsage"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the {@link QuotaUsage} of a given {@link Path}.
+ @param f path to use
+ @return the quota usage
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="setQuota"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="namespaceQuota" type="long"/>
+      <param name="storagespaceQuota" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set quota for the given {@link Path}.
+
+ @param src the target path to set quota for
+ @param namespaceQuota the namespace quota (i.e., # of files/directories)
+                       to set
+ @param storagespaceQuota the storage space quota to set
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="setQuotaByStorageType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="type" type="org.apache.hadoop.fs.StorageType"/>
+      <param name="quota" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set per storage type quota for the given {@link Path}.
+
+ @param src the target path to set storage type quota for
+ @param type the storage type to set
+ @param quota the quota to set for the given storage type
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+ <p>
+ Does not guarantee to return the List of files/directories status in a
+ sorted order.
+ <p>
+ Will not return null. Expect IOException upon access error.
+ @param f given path
+ @return the statuses of the files/directories in the given patch
+ @throws FileNotFoundException when the path does not exist
+ @throws IOException see specific implementation]]>
+      </doc>
+    </method>
+    <method name="listCorruptFileBlocks" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List corrupted file blocks.
+ @return an iterator over the corrupt files under the given path
+ (may contain duplicates if a file has more than one corrupt block)
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default).
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Filter files/directories in the given path using the user-supplied path
+ filter.
+ <p>
+ Does not guarantee to return the List of files/directories status in a
+ sorted order.
+
+ @param f
+          a path name
+ @param filter
+          the user-supplied path filter
+ @return an array of FileStatus objects for the files under the given path
+         after applying the filter
+ @throws FileNotFoundException when the path does not exist
+ @throws IOException see specific implementation]]>
+      </doc>
+    </method>
+    <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Filter files/directories in the given list of paths using default
+ path filter.
+ <p>
+ Does not guarantee to return the List of files/directories status in a
+ sorted order.
+
+ @param files
+          a list of paths
+ @return a list of statuses for the files under the given paths after
+         applying the filter default Path filter
+ @throws FileNotFoundException when the path does not exist
+ @throws IOException see specific implementation]]>
+      </doc>
+    </method>
+    <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="files" type="org.apache.hadoop.fs.Path[]"/>
+      <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Filter files/directories in the given list of paths using user-supplied
+ path filter.
+ <p>
+ Does not guarantee to return the List of files/directories status in a
+ sorted order.
+
+ @param files
+          a list of paths
+ @param filter
+          the user-supplied path filter
+ @return a list of statuses for the files under the given paths after
+         applying the filter
+ @throws FileNotFoundException when the path does not exist
+ @throws IOException see specific implementation]]>
+      </doc>
+    </method>
+    <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Return all the files that match filePattern and are not checksum
+ files. Results are sorted by their names.
+
+ <p>
+ A filename pattern is composed of <i>regular</i> characters and
+ <i>special pattern matching</i> characters, which are:
+
+ <dl>
+  <dd>
+   <dl>
+    <dt> <tt> ? </tt>
+    <dd> Matches any single character.
+
+    <p>
+    <dt> <tt> * </tt>
+    <dd> Matches zero or more characters.
+
+    <p>
+    <dt> <tt> [<i>abc</i>] </tt>
+    <dd> Matches a single character from character set
+     <tt>{<i>a,b,c</i>}</tt>.
+
+    <p>
+    <dt> <tt> [<i>a</i>-<i>b</i>] </tt>
+    <dd> Matches a single character from the character range
+     <tt>{<i>a...b</i>}</tt>.  Note that character <tt><i>a</i></tt> must be
+     lexicographically less than or equal to character <tt><i>b</i></tt>.
+
+    <p>
+    <dt> <tt> [^<i>a</i>] </tt>
+    <dd> Matches a single character that is not from character set or range
+     <tt>{<i>a</i>}</tt>.  Note that the <tt>^</tt> character must occur
+     immediately to the right of the opening bracket.
+
+    <p>
+    <dt> <tt> \<i>c</i> </tt>
+    <dd> Removes (escapes) any special meaning of character <i>c</i>.
+
+    <p>
+    <dt> <tt> {ab,cd} </tt>
+    <dd> Matches a string from the string set <tt>{<i>ab, cd</i>} </tt>
+
+    <p>
+    <dt> <tt> {ab,c{de,fh}} </tt>
+    <dd> Matches a string from the string set <tt>{<i>ab, cde, cfh</i>}</tt>
+
+   </dl>
+  </dd>
+ </dl>
+
+ @param pathPattern a glob specifying a path pattern
+
+ @return an array of paths that match the path pattern
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pathPattern" type="org.apache.hadoop.fs.Path"/>
+      <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return an array of {@link FileStatus} objects whose path names match
+ {@code pathPattern} and is accepted by the user-supplied path filter.
+ Results are sorted by their path names.
+
+ @param pathPattern a glob specifying the path pattern
+ @param filter a user-supplied path filter
+ @return null if {@code pathPattern} has no glob and the path does not exist
+         an empty array if {@code pathPattern} has a glob and no path
+         matches it else an array of {@link FileStatus} objects matching the
+         pattern
+ @throws IOException if any I/O error occurs when fetching file status]]>
+      </doc>
+    </method>
+    <method name="listLocatedStatus" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List the statuses of the files/directories in the given path if the path is
+ a directory.
+ Return the file's status and block locations If the path is a file.
+
+ If a returned status is a file, it contains the file's block locations.
+
+ @param f is the path
+
+ @return an iterator that traverses statuses of the files/directories
+         in the given path
+
+ @throws FileNotFoundException If <code>f</code> does not exist
+ @throws IOException If an I/O error occurred]]>
+      </doc>
+    </method>
+    <method name="listLocatedStatus" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List a directory.
+ The returned results include its block location if it is a file
+ The results are filtered by the given path filter
+ @param f a path
+ @param filter a path filter
+ @return an iterator that traverses statuses of the files/directories
+         in the given path
+ @throws FileNotFoundException if <code>f</code> does not exist
+ @throws IOException if any I/O error occurred]]>
+      </doc>
+    </method>
+    <method name="listStatusIterator" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns a remote iterator so that followup calls are made on demand
+ while consuming the entries. Each FileSystem implementation should
+ override this method and provide a more efficient implementation, if
+ possible.
+
+ Does not guarantee to return the iterator that traverses statuses
+ of the files in a sorted order.
+
+ @param p target path
+ @return remote iterator
+ @throws FileNotFoundException if <code>p</code> does not exist
+ @throws IOException if any I/O error occurred]]>
+      </doc>
+    </method>
+    <method name="listFiles" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="recursive" type="boolean"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List the statuses and block locations of the files in the given path.
+ Does not guarantee to return the iterator that traverses statuses
+ of the files in a sorted order.
+
+ <pre>
+ If the path is a directory,
+   if recursive is false, returns files in the directory;
+   if recursive is true, return files in the subtree rooted at the path.
+ If the path is a file, return the file's status and block locations.
+ </pre>
+ @param f is the path
+ @param recursive if the subdirectories need to be traversed recursively
+
+ @return an iterator that traverses statuses of the files
+
+ @throws FileNotFoundException when the path does not exist;
+ @throws IOException see specific implementation]]>
+      </doc>
+    </method>
+    <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the current user's home directory in this FileSystem.
+ The default implementation returns {@code "/user/$USER/"}.]]>
+      </doc>
+    </method>
+    <method name="setWorkingDirectory"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="new_dir" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Set the current working directory for the given FileSystem. All relative
+ paths will be resolved relative to it.
+
+ @param new_dir Path of new working directory]]>
+      </doc>
+    </method>
+    <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current working directory for the given FileSystem
+ @return the directory pathname]]>
+      </doc>
+    </method>
+    <method name="getInitialWorkingDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Note: with the new FileContext class, getWorkingDirectory()
+ will be removed.
+ The working directory is implemented in FileContext.
+
+ Some FileSystems like LocalFileSystem have an initial workingDir
+ that we use as the starting workingDir. For other file systems
+ like HDFS there is no built in notion of an initial workingDir.
+
+ @return if there is built in notion of workingDir then it
+ is returned; else a null is returned.]]>
+      </doc>
+    </method>
+    <method name="mkdirs" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Call {@link #mkdirs(Path, FsPermission)} with default permission.
+ @param f path
+ @return true if the directory was created
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="mkdirs" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Make the given file and all non-existent parents into
+ directories. Has roughly the semantics of Unix @{code mkdir -p}.
+ Existence of the directory hierarchy is not an error.
+ @param f path to create
+ @param permission to apply to f
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="copyFromLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The src file is on the local disk.  Add it to filesystem at
+ the given dst name and the source is kept intact afterwards
+ @param src path
+ @param dst path
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="moveFromLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The src files is on the local disk.  Add it to filesystem at
+ the given dst name, removing the source afterwards.
+ @param srcs source paths
+ @param dst path
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="moveFromLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The src file is on the local disk.  Add it to the filesystem at
+ the given dst name, removing the source afterwards.
+ @param src local path
+ @param dst path
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="copyFromLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delSrc" type="boolean"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The src file is on the local disk.  Add it to the filesystem at
+ the given dst name.
+ delSrc indicates if the source should be removed
+ @param delSrc whether to delete the src
+ @param src path
+ @param dst path]]>
+      </doc>
+    </method>
+    <method name="copyFromLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delSrc" type="boolean"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The src files are on the local disk.  Add it to the filesystem at
+ the given dst name.
+ delSrc indicates if the source should be removed
+ @param delSrc whether to delete the src
+ @param overwrite whether to overwrite an existing file
+ @param srcs array of paths which are source
+ @param dst path
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="copyFromLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delSrc" type="boolean"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The src file is on the local disk.  Add it to the filesystem at
+ the given dst name.
+ delSrc indicates if the source should be removed
+ @param delSrc whether to delete the src
+ @param overwrite whether to overwrite an existing file
+ @param src path
+ @param dst path
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="copyToLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copy it a file from the remote filesystem to the local one.
+ @param src path src file in the remote filesystem
+ @param dst path local destination
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="moveToLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copy a file to the local filesystem, then delete it from the
+ remote filesystem (if successfully copied).
+ @param src path src file in the remote filesystem
+ @param dst path local destination
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="copyToLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delSrc" type="boolean"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copy it a file from a remote filesystem to the local one.
+ delSrc indicates if the src will be removed or not.
+ @param delSrc whether to delete the src
+ @param src path src file in the remote filesystem
+ @param dst path local destination
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="copyToLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delSrc" type="boolean"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <param name="useRawLocalFileSystem" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The src file is under this filesystem, and the dst is on the local disk.
+ Copy it from the remote filesystem to the local dst name.
+ delSrc indicates if the src will be removed
+ or not. useRawLocalFileSystem indicates whether to use RawLocalFileSystem
+ as the local file system or not. RawLocalFileSystem is non checksumming,
+ So, It will not create any crc files at local.
+
+ @param delSrc
+          whether to delete the src
+ @param src
+          path
+ @param dst
+          path
+ @param useRawLocalFileSystem
+          whether to use RawLocalFileSystem as local file system or not.
+
+ @throws IOException for any IO error]]>
+      </doc>
+    </method>
+    <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+      <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns a local file that the user can write output to.  The caller
+ provides both the eventual target name in this FileSystem
+ and the local working file path.
+ If this FileSystem is local, we write directly into the target.  If
+ the FileSystem is not local, we write into the tmp local area.
+ @param fsOutputFile path of output file
+ @param tmpLocalFile path of local tmp file
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="completeLocalOutput"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+      <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Called when we're all done writing to the target.
+ A local FS will do nothing, because we've written to exactly the
+ right place.
+ A remote FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.
+ @param fsOutputFile path of output file
+ @param tmpLocalFile path to local tmp file
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Close this FileSystem instance.
+ Will release any held locks, delete all files queued for deletion
+ through calls to {@link #deleteOnExit(Path)}, and remove this FS instance
+ from the cache, if cached.
+
+ After this operation, the outcome of any method call on this FileSystem
+ instance, or any input/output stream created by it is <i>undefined</i>.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="getUsed" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the total size of all files in the filesystem.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="getUsed" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the total size of all files from a specified path.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="getBlockSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #getFileStatus(Path)} instead">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the block size for a particular file.
+ @param f the filename
+ @return the number of bytes in a block
+ @deprecated Use {@link #getFileStatus(Path)} instead
+ @throws FileNotFoundException if the path is not present
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="getDefaultBlockSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #getDefaultBlockSize(Path)} instead">
+      <doc>
+      <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize I/O time.
+ @deprecated use {@link #getDefaultBlockSize(Path)} instead]]>
+      </doc>
+    </method>
+    <method name="getDefaultBlockSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Return the number of bytes that large input files should be optimally
+ be split into to minimize I/O time.  The given path will be used to
+ locate the actual filesystem.  The full path does not have to exist.
+ @param f path of file
+ @return the default block size for the path's filesystem]]>
+      </doc>
+    </method>
+    <method name="getDefaultReplication" return="short"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #getDefaultReplication(Path)} instead">
+      <doc>
+      <![CDATA[Get the default replication.
+ @return the replication; the default value is "1".
+ @deprecated use {@link #getDefaultReplication(Path)} instead]]>
+      </doc>
+    </method>
+    <method name="getDefaultReplication" return="short"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Get the default replication for a path.
+ The given path will be used to locate the actual FileSystem to query.
+ The full path does not have to exist.
+ @param path of the file
+ @return default replication for the path's filesystem]]>
+      </doc>
+    </method>
+    <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return a file status object that represents the path.
+ @param f The path we want information from
+ @return a FileStatus object
+ @throws FileNotFoundException when the path does not exist
+ @throws IOException see specific implementation]]>
+      </doc>
+    </method>
+    <method name="msync"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+      <doc>
+      <![CDATA[Synchronize client metadata state.
+ <p>
+ In some FileSystem implementations such as HDFS metadata
+ synchronization is essential to guarantee consistency of read requests
+ particularly in HA setting.
+ @throws IOException
+ @throws UnsupportedOperationException]]>
+      </doc>
+    </method>
+    <method name="fixRelativePart" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[See {@link FileContext#fixRelativePart}.]]>
+      </doc>
+    </method>
+    <method name="createSymlink"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="target" type="org.apache.hadoop.fs.Path"/>
+      <param name="link" type="org.apache.hadoop.fs.Path"/>
+      <param name="createParent" type="boolean"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[See {@link FileContext#createSymlink(Path, Path, boolean)}.]]>
+      </doc>
+    </method>
+    <method name="getFileLinkStatus" return="org.apache.hadoop.fs.FileStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[See {@link FileContext#getFileLinkStatus(Path)}.
+ @throws FileNotFoundException when the path does not exist
+ @throws IOException see specific implementation]]>
+      </doc>
+    </method>
+    <method name="supportsSymlinks" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[See {@link AbstractFileSystem#supportsSymlinks()}.]]>
+      </doc>
+    </method>
+    <method name="getLinkTarget" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[See {@link FileContext#getLinkTarget(Path)}.
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="resolveLink" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[See {@link AbstractFileSystem#getLinkTarget(Path)}.
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the checksum of a file, if the FS supports checksums.
+
+ @param f The file path
+ @return The file checksum.  The default return value is null,
+  which indicates that no checksum algorithm is implemented
+  in the corresponding FileSystem.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="length" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the checksum of a file, from the beginning of the file till the
+ specific length.
+ @param f The file path
+ @param length The length of the file range for checksum calculation
+ @return The file checksum or null if checksums are not supported.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="setVerifyChecksum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="verifyChecksum" type="boolean"/>
+      <doc>
+      <![CDATA[Set the verify checksum flag. This is only applicable if the
+ corresponding filesystem supports checksums.
+ By default doesn't do anything.
+ @param verifyChecksum Verify checksum flag]]>
+      </doc>
+    </method>
+    <method name="setWriteChecksum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="writeChecksum" type="boolean"/>
+      <doc>
+      <![CDATA[Set the write checksum flag. This is only applicable if the
+ corresponding filesystem supports checksums.
+ By default doesn't do anything.
+ @param writeChecksum Write checksum flag]]>
+      </doc>
+    </method>
+    <method name="getStatus" return="org.apache.hadoop.fs.FsStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns a status object describing the use and capacity of the
+ filesystem. If the filesystem has multiple partitions, the
+ use and capacity of the root partition is reflected.
+
+ @return a FsStatus object
+ @throws IOException
+           see specific implementation]]>
+      </doc>
+    </method>
+    <method name="getStatus" return="org.apache.hadoop.fs.FsStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns a status object describing the use and capacity of the
+ filesystem. If the filesystem has multiple partitions, the
+ use and capacity of the partition pointed to by the specified
+ path is reflected.
+ @param p Path for which status should be obtained. null means
+ the default partition.
+ @return a FsStatus object
+ @throws IOException
+           see specific implementation]]>
+      </doc>
+    </method>
+    <method name="setPermission"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set permission of a path.
+ @param p The path
+ @param permission permission
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="setOwner"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <param name="username" type="java.lang.String"/>
+      <param name="groupname" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set owner of a path (i.e. a file or a directory).
+ The parameters username and groupname cannot both be null.
+ @param p The path
+ @param username If it is null, the original username remains unchanged.
+ @param groupname If it is null, the original groupname remains unchanged.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="setTimes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <param name="mtime" type="long"/>
+      <param name="atime" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set access time of a file.
+ @param p The path
+ @param mtime Set the modification time of this file.
+              The number of milliseconds since Jan 1, 1970.
+              A value of -1 means that this call should not set modification time.
+ @param atime Set the access time of this file.
+              The number of milliseconds since Jan 1, 1970.
+              A value of -1 means that this call should not set access time.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="createSnapshot" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a snapshot with a default name.
+ @param path The directory where snapshots will be taken.
+ @return the snapshot path.
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported]]>
+      </doc>
+    </method>
+    <method name="createSnapshot" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="snapshotName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a snapshot.
+ @param path The directory where snapshots will be taken.
+ @param snapshotName The name of the snapshot
+ @return the snapshot path.
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported]]>
+      </doc>
+    </method>
+    <method name="renameSnapshot"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="snapshotOldName" type="java.lang.String"/>
+      <param name="snapshotNewName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Rename a snapshot.
+ @param path The directory path where the snapshot was taken
+ @param snapshotOldName Old name of the snapshot
+ @param snapshotNewName New name of the snapshot
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="deleteSnapshot"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="snapshotName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Delete a snapshot of a directory.
+ @param path  The directory that the to-be-deleted snapshot belongs to
+ @param snapshotName The name of the snapshot
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="modifyAclEntries"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="aclSpec" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Modifies ACL entries of files and directories.  This method can add new ACL
+ entries or modify the permissions on existing ACL entries.  All existing
+ ACL entries that are not specified in this call are retained without
+ changes.  (Modifications are merged into the current ACL.)
+
+ @param path Path to modify
+ @param aclSpec List&lt;AclEntry&gt; describing modifications
+ @throws IOException if an ACL could not be modified
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="removeAclEntries"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="aclSpec" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Removes ACL entries from files and directories.  Other ACL entries are
+ retained.
+
+ @param path Path to modify
+ @param aclSpec List describing entries to remove
+ @throws IOException if an ACL could not be modified
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="removeDefaultAcl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Removes all default ACL entries from files and directories.
+
+ @param path Path to modify
+ @throws IOException if an ACL could not be modified
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="removeAcl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Removes all but the base ACL entries of files and directories.  The entries
+ for user, group, and others are retained for compatibility with permission
+ bits.
+
+ @param path Path to modify
+ @throws IOException if an ACL could not be removed
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="setAcl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="aclSpec" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Fully replaces ACL of files and directories, discarding all existing
+ entries.
+
+ @param path Path to modify
+ @param aclSpec List describing modifications, which must include entries
+   for user, group, and others for compatibility with permission bits.
+ @throws IOException if an ACL could not be modified
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="getAclStatus" return="org.apache.hadoop.fs.permission.AclStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets the ACL of a file or directory.
+
+ @param path Path to get
+ @return AclStatus describing the ACL of the file or directory
+ @throws IOException if an ACL could not be read
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="setXAttr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="byte[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set an xattr of a file or directory.
+ The name must be prefixed with the namespace followed by ".". For example,
+ "user.attr".
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to modify
+ @param name xattr name.
+ @param value xattr value.
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="setXAttr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="byte[]"/>
+      <param name="flag" type="java.util.EnumSet"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set an xattr of a file or directory.
+ The name must be prefixed with the namespace followed by ".". For example,
+ "user.attr".
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to modify
+ @param name xattr name.
+ @param value xattr value.
+ @param flag xattr set flag
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="getXAttr" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get an xattr name and value for a file or directory.
+ The name must be prefixed with the namespace followed by ".". For example,
+ "user.attr".
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to get extended attribute
+ @param name xattr name.
+ @return byte[] xattr value.
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="getXAttrs" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get all of the xattr name/value pairs for a file or directory.
+ Only those xattrs which the logged-in user has permissions to view
+ are returned.
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to get extended attributes
+ @return Map describing the XAttrs of the file or directory
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="getXAttrs" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="names" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get all of the xattrs name/value pairs for a file or directory.
+ Only those xattrs which the logged-in user has permissions to view
+ are returned.
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to get extended attributes
+ @param names XAttr names.
+ @return Map describing the XAttrs of the file or directory
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="listXAttrs" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get all of the xattr names for a file or directory.
+ Only those xattr names which the logged-in user has permissions to view
+ are returned.
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to get extended attributes
+ @return List{@literal <String>} of the XAttr names of the file or directory
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="removeXAttr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Remove an xattr of a file or directory.
+ The name must be prefixed with the namespace followed by ".". For example,
+ "user.attr".
+ <p>
+ Refer to the HDFS extended attributes user documentation for details.
+
+ @param path Path to remove extended attribute
+ @param name xattr name
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="satisfyStoragePolicy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the source path to satisfy storage policy.
+ @param path The source path referring to either a directory or a file.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setStoragePolicy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="policyName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the storage policy for a given file or directory.
+
+ @param src file or directory path.
+ @param policyName the name of the target storage policy. The list
+                   of supported Storage policies can be retrieved
+                   via {@link #getAllStoragePolicies}.
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="unsetStoragePolicy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Unset the storage policy set for a given file or directory.
+ @param src file or directory path.
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="getStoragePolicy" return="org.apache.hadoop.fs.BlockStoragePolicySpi"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Query the effective storage policy ID for the given file or directory.
+
+ @param src file or directory path.
+ @return storage policy for give file.
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="getAllStoragePolicies" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Retrieve all the storage policies supported by this file system.
+
+ @return all storage policies supported by this filesystem.
+ @throws IOException IO failure
+ @throws UnsupportedOperationException if the operation is unsupported
+         (default outcome).]]>
+      </doc>
+    </method>
+    <method name="getTrashRoot" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Get the root directory of Trash for current user when the path specified
+ is deleted.
+
+ @param path the trash root of the path to be determined.
+ @return the default implementation returns {@code /user/$USER/.Trash}]]>
+      </doc>
+    </method>
+    <method name="getTrashRoots" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allUsers" type="boolean"/>
+      <doc>
+      <![CDATA[Get all the trash roots for current user or all users.
+
+ @param allUsers return trash roots for all users if true.
+ @return all the trash root directories.
+         Default FileSystem returns .Trash under users' home directories if
+         {@code /user/$USER/.Trash} exists.]]>
+      </doc>
+    </method>
+    <method name="hasPathCapability" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="capability" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The base FileSystem implementation generally has no knowledge
+ of the capabilities of actual implementations.
+ Unless it has a way to explicitly determine the capabilities,
+ this method returns false.
+ {@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getFileSystemClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scheme" type="java.lang.String"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the FileSystem implementation class of a filesystem.
+ This triggers a scan and load of all FileSystem implementations listed as
+ services and discovered via the {@link ServiceLoader}
+ @param scheme URL scheme of FS
+ @param conf configuration: can be null, in which case the check for
+ a filesystem binding declaration in the configuration is skipped.
+ @return the filesystem
+ @throws UnsupportedFileSystemException if there was no known implementation
+         for the scheme.
+ @throws IOException if the filesystem could not be loaded]]>
+      </doc>
+    </method>
+    <method name="getStatistics" return="java.util.Map"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="use {@link #getGlobalStorageStatistics()}">
+      <doc>
+      <![CDATA[Get the Map of Statistics object indexed by URI Scheme.
+ @return a Map having a key as URI scheme and value as Statistics object
+ @deprecated use {@link #getGlobalStorageStatistics()}]]>
+      </doc>
+    </method>
+    <method name="getAllStatistics" return="java.util.List"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="use {@link #getGlobalStorageStatistics()}">
+      <doc>
+      <![CDATA[Return the FileSystem classes that have Statistics.
+ @deprecated use {@link #getGlobalStorageStatistics()}]]>
+      </doc>
+    </method>
+    <method name="getStatistics" return="org.apache.hadoop.fs.FileSystem.Statistics"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="use {@link #getGlobalStorageStatistics()}">
+      <param name="scheme" type="java.lang.String"/>
+      <param name="cls" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Get the statistics for a particular file system.
+ @param cls the class to lookup
+ @return a statistics object
+ @deprecated use {@link #getGlobalStorageStatistics()}]]>
+      </doc>
+    </method>
+    <method name="clearStatistics"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Reset all statistics for all file systems.]]>
+      </doc>
+    </method>
+    <method name="printStatistics"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Print all statistics for all file systems to {@code System.out}]]>
+      </doc>
+    </method>
+    <method name="areSymlinksEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="enableSymlinks"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStorageStatistics" return="org.apache.hadoop.fs.StorageStatistics"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the StorageStatistics for this FileSystem object.  These statistics are
+ per-instance.  They are not shared with any other FileSystem object.
+
+ <p>This is a default method which is intended to be overridden by
+ subclasses. The default implementation returns an empty storage statistics
+ object.</p>
+
+ @return    The StorageStatistics for this FileSystem instance.
+            Will never be null.]]>
+      </doc>
+    </method>
+    <method name="getGlobalStorageStatistics" return="org.apache.hadoop.fs.GlobalStorageStatistics"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the global storage statistics.]]>
+      </doc>
+    </method>
+    <method name="createDataOutputStreamBuilder" return="org.apache.hadoop.fs.FSDataOutputStreamBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fileSystem" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Create instance of the standard FSDataOutputStreamBuilder for the
+ given filesystem and path.
+ @param fileSystem owner
+ @param path path to create
+ @return a builder.]]>
+      </doc>
+    </method>
+    <method name="createFile" return="org.apache.hadoop.fs.FSDataOutputStreamBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Create a new FSDataOutputStreamBuilder for the file with path.
+ Files are overwritten by default.
+
+ @param path file path
+ @return a FSDataOutputStreamBuilder object to build the file
+
+ HADOOP-14384. Temporarily reduce the visibility of method before the
+ builder interface becomes stable.]]>
+      </doc>
+    </method>
+    <method name="appendFile" return="org.apache.hadoop.fs.FSDataOutputStreamBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Create a Builder to append a file.
+ @param path file path.
+ @return a {@link FSDataOutputStreamBuilder} to build file append request.]]>
+      </doc>
+    </method>
+    <method name="openFile" return="org.apache.hadoop.fs.FutureDataInputStreamBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+      <doc>
+      <![CDATA[Open a file for reading through a builder API.
+ Ultimately calls {@link #open(Path, int)} unless a subclass
+ executes the open command differently.
+
+ The semantics of this call are therefore the same as that of
+ {@link #open(Path, int)} with one special point: it is in
+ {@code FSDataInputStreamBuilder.build()} in which the open operation
+ takes place -it is there where all preconditions to the operation
+ are checked.
+ @param path file path
+ @return a FSDataInputStreamBuilder object to build the input stream
+ @throws IOException if some early checks cause IO failures.
+ @throws UnsupportedOperationException if support is checked early.]]>
+      </doc>
+    </method>
+    <method name="openFile" return="org.apache.hadoop.fs.FutureDataInputStreamBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pathHandle" type="org.apache.hadoop.fs.PathHandle"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+      <doc>
+      <![CDATA[Open a file for reading through a builder API.
+ Ultimately calls {@link #open(PathHandle, int)} unless a subclass
+ executes the open command differently.
+
+ If PathHandles are unsupported, this may fail in the
+ {@code FSDataInputStreamBuilder.build()}  command,
+ rather than in this {@code openFile()} operation.
+ @param pathHandle path handle.
+ @return a FSDataInputStreamBuilder object to build the input stream
+ @throws IOException if some early checks cause IO failures.
+ @throws UnsupportedOperationException if support is checked early.]]>
+      </doc>
+    </method>
+    <method name="openFileWithOptions" return="java.util.concurrent.CompletableFuture"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="parameters" type="org.apache.hadoop.fs.impl.OpenFileParameters"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Execute the actual open file operation.
+
+ This is invoked from {@code FSDataInputStreamBuilder.build()}
+ and from {@link DelegateToFileSystem} and is where
+ the action of opening the file should begin.
+
+ The base implementation performs a blocking
+ call to {@link #open(Path, int)} in this call;
+ the actual outcome is in the returned {@code CompletableFuture}.
+ This avoids having to create some thread pool, while still
+ setting up the expectation that the {@code get()} call
+ is needed to evaluate the result.
+ @param path path to the file
+ @param parameters open file parameters from the builder.
+ @return a future which will evaluate to the opened file.
+ @throws IOException failure to resolve the link.
+ @throws IllegalArgumentException unknown mandatory key]]>
+      </doc>
+    </method>
+    <method name="openFileWithOptions" return="java.util.concurrent.CompletableFuture"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="pathHandle" type="org.apache.hadoop.fs.PathHandle"/>
+      <param name="parameters" type="org.apache.hadoop.fs.impl.OpenFileParameters"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Execute the actual open file operation.
+ The base implementation performs a blocking
+ call to {@link #open(Path, int)} in this call;
+ the actual outcome is in the returned {@code CompletableFuture}.
+ This avoids having to create some thread pool, while still
+ setting up the expectation that the {@code get()} call
+ is needed to evaluate the result.
+ @param pathHandle path to the file
+ @param parameters open file parameters from the builder.
+ @return a future which will evaluate to the opened file.
+ @throws IOException failure to resolve the link.
+ @throws IllegalArgumentException unknown mandatory key
+ @throws UnsupportedOperationException PathHandles are not supported.
+ This may be deferred until the future is evaluated.]]>
+      </doc>
+    </method>
+    <method name="createMultipartUploader" return="org.apache.hadoop.fs.MultipartUploaderBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="basePath" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a multipart uploader.
+ @param basePath file path under which all files are uploaded
+ @return a MultipartUploaderBuilder object to build the uploader
+ @throws IOException if some early checks cause IO failures.
+ @throws UnsupportedOperationException if support is checked early.]]>
+      </doc>
+    </method>
+    <field name="FS_DEFAULT_NAME_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LOG" type="org.apache.commons.logging.Log"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This log is widely used in the org.apache.hadoop.fs code and tests,
+ so must be considered something to only be changed with care.]]>
+      </doc>
+    </field>
+    <field name="SHUTDOWN_HOOK_PRIORITY" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Priority of the FileSystem shutdown hook: {@value}.]]>
+      </doc>
+    </field>
+    <field name="TRASH_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Prefix for trash directory: {@value}.]]>
+      </doc>
+    </field>
+    <field name="USER_HOME_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="statistics" type="org.apache.hadoop.fs.FileSystem.Statistics"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The statistics for this file system.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[An abstract base class for a fairly generic filesystem.  It
+ may be implemented as a distributed filesystem, or as a "local"
+ one that reflects the locally-connected disk.  The local version
+ exists for small Hadoop instances and for testing.
+
+ <p>
+
+ All user code that may potentially use the Hadoop Distributed
+ File System should be written to use a FileSystem object or its
+ successor, {@link FileContext}.
+
+ <p>
+ The local implementation is {@link LocalFileSystem} and distributed
+ implementation is DistributedFileSystem. There are other implementations
+ for object stores and (outside the Apache Hadoop codebase),
+ third party filesystems.
+ <p>
+ Notes
+ <ol>
+ <li>The behaviour of the filesystem is
+ <a href="https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/filesystem/filesystem.html">
+ specified in the Hadoop documentation. </a>
+ However, the normative specification of the behavior of this class is
+ actually HDFS: if HDFS does not behave the way these Javadocs or
+ the specification in the Hadoop documentations define, assume that
+ the documentation is incorrect.
+ </li>
+ <li>The term {@code FileSystem} refers to an instance of this class.</li>
+ <li>The acronym "FS" is used as an abbreviation of FileSystem.</li>
+ <li>The term {@code filesystem} refers to the distributed/local filesystem
+ itself, rather than the class used to interact with it.</li>
+ <li>The term "file" refers to a file in the remote filesystem,
+ rather than instances of {@code java.io.File}.</li>
+ </ol>
+
+ This is a carefully evolving class.
+ New methods may be marked as Unstable or Evolving for their initial release,
+ as a warning that they are new and may change based on the
+ experience of use in applications.
+ <p></p>
+ <b>Important note for developers</b>
+ <p></p>
+ If you are making changes here to the public API or protected methods,
+ you must review the following subclasses and make sure that
+ they are filtering/passing through new methods as appropriate.
+ <p></p>
+
+ {@link FilterFileSystem}: methods are passed through. If not,
+ then {@code TestFilterFileSystem.MustNotImplement} must be
+ updated with the unsupported interface.
+ Furthermore, if the new API's support is probed for via
+ {@link #hasPathCapability(Path, String)} then
+ {@link FilterFileSystem#hasPathCapability(Path, String)}
+ must return false, always.
+ <p></p>
+ {@link ChecksumFileSystem}: checksums are created and
+ verified.
+ <p></p>
+ {@code TestHarFileSystem} will need its {@code MustNotImplement}
+ interface updated.
+ <p></p>
+
+ There are some external places your changes will break things.
+ Do co-ordinate changes here.
+ <p></p>
+
+ HBase: HBoss
+ <p></p>
+ Hive: HiveShim23
+ {@code shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java}]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.FileSystem -->
+  <!-- start class org.apache.hadoop.fs.FileUtil -->
+  <class name="FileUtil" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FileUtil"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+      <doc>
+      <![CDATA[convert an array of FileStatus to an array of Path
+
+ @param stats
+          an array of FileStatus objects
+ @return an array of paths corresponding to the input]]>
+      </doc>
+    </method>
+    <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[convert an array of FileStatus to an array of Path.
+ If stats if null, return path
+ @param stats
+          an array of FileStatus objects
+ @param path
+          default path to return in stats is null
+ @return an array of paths corresponding to the input]]>
+      </doc>
+    </method>
+    <method name="fullyDeleteOnExit"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="java.io.File"/>
+      <doc>
+      <![CDATA[Register all files recursively to be deleted on exit.
+ @param file File/directory to be deleted]]>
+      </doc>
+    </method>
+    <method name="fullyDelete" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dir" type="java.io.File"/>
+      <doc>
+      <![CDATA[Delete a directory and all its contents.  If
+ we return false, the directory may be partially-deleted.
+ (1) If dir is symlink to a file, the symlink is deleted. The file pointed
+     to by the symlink is not deleted.
+ (2) If dir is symlink to a directory, symlink is deleted. The directory
+     pointed to by symlink is not deleted.
+ (3) If dir is a normal file, it is deleted.
+ (4) If dir is a normal directory, then dir and all its contents recursively
+     are deleted.]]>
+      </doc>
+    </method>
+    <method name="fullyDelete" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dir" type="java.io.File"/>
+      <param name="tryGrantPermissions" type="boolean"/>
+      <doc>
+      <![CDATA[Delete a directory and all its contents.  If
+ we return false, the directory may be partially-deleted.
+ (1) If dir is symlink to a file, the symlink is deleted. The file pointed
+     to by the symlink is not deleted.
+ (2) If dir is symlink to a directory, symlink is deleted. The directory
+     pointed to by symlink is not deleted.
+ (3) If dir is a normal file, it is deleted.
+ (4) If dir is a normal directory, then dir and all its contents recursively
+     are deleted.
+ @param dir the file or directory to be deleted
+ @param tryGrantPermissions true if permissions should be modified to delete a file.
+ @return true on success false on failure.]]>
+      </doc>
+    </method>
+    <method name="readLink" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="java.io.File"/>
+      <doc>
+      <![CDATA[Returns the target of the given symlink. Returns the empty string if
+ the given path does not refer to a symlink or there is an error
+ accessing the symlink.
+ @param f File representing the symbolic link.
+ @return The target of the symbolic link, empty string on error or if not
+         a symlink.]]>
+      </doc>
+    </method>
+    <method name="fullyDeleteContents" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dir" type="java.io.File"/>
+      <doc>
+      <![CDATA[Delete the contents of a directory, not the directory itself.  If
+ we return false, the directory may be partially-deleted.
+ If dir is a symlink to a directory, all the contents of the actual
+ directory pointed to by dir will be deleted.]]>
+      </doc>
+    </method>
+    <method name="fullyDeleteContents" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dir" type="java.io.File"/>
+      <param name="tryGrantPermissions" type="boolean"/>
+      <doc>
+      <![CDATA[Delete the contents of a directory, not the directory itself.  If
+ we return false, the directory may be partially-deleted.
+ If dir is a symlink to a directory, all the contents of the actual
+ directory pointed to by dir will be deleted.
+ @param tryGrantPermissions if 'true', try grant +rwx permissions to this
+ and all the underlying directories before trying to delete their contents.]]>
+      </doc>
+    </method>
+    <method name="fullyDelete"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link FileSystem#delete(Path, boolean)}">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="dir" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Recursively delete a directory.
+
+ @param fs {@link FileSystem} on which the path is present
+ @param dir directory to recursively delete
+ @throws IOException
+ @deprecated Use {@link FileSystem#delete(Path, boolean)}]]>
+      </doc>
+    </method>
+    <method name="copy" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <param name="deleteSource" type="boolean"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copy files between FileSystems.]]>
+      </doc>
+    </method>
+    <method name="copy" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+      <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <param name="deleteSource" type="boolean"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="copy" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <param name="deleteSource" type="boolean"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copy files between FileSystems.]]>
+      </doc>
+    </method>
+    <method name="copy" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="srcStatus" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <param name="deleteSource" type="boolean"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copy files between FileSystems.]]>
+      </doc>
+    </method>
+    <method name="copy" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="java.io.File"/>
+      <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <param name="deleteSource" type="boolean"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copy local files to a FileSystem.]]>
+      </doc>
+    </method>
+    <method name="copy" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="java.io.File"/>
+      <param name="deleteSource" type="boolean"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copy FileSystem files to local files.]]>
+      </doc>
+    </method>
+    <method name="makeShellPath" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filename" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param filename The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+      </doc>
+    </method>
+    <method name="makeShellPath" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param file The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+      </doc>
+    </method>
+    <method name="makeSecureShellPath" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Convert a os-native filename to a path that works for the shell
+ and avoids script injection attacks.
+ @param file The filename to convert
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+      </doc>
+    </method>
+    <method name="makeShellPath" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="java.io.File"/>
+      <param name="makeCanonicalPath" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Convert a os-native filename to a path that works for the shell.
+ @param file The filename to convert
+ @param makeCanonicalPath
+          Whether to make canonical path for the file passed
+ @return The unix pathname
+ @throws IOException on windows, there can be problems with the subprocess]]>
+      </doc>
+    </method>
+    <method name="getDU" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dir" type="java.io.File"/>
+      <doc>
+      <![CDATA[Takes an input dir and returns the du on that local directory. Very basic
+ implementation.
+
+ @param dir
+          The input dir to get the disk space of this local dir
+ @return The total disk space of the input local directory]]>
+      </doc>
+    </method>
+    <method name="unZip"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="inputStream" type="java.io.InputStream"/>
+      <param name="toDir" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Given a stream input it will unzip the it in the unzip directory.
+ passed as the second parameter
+ @param inputStream The zip file as input
+ @param toDir The unzip directory where to unzip the zip file.
+ @throws IOException an exception occurred]]>
+      </doc>
+    </method>
+    <method name="unZip"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="inFile" type="java.io.File"/>
+      <param name="unzipDir" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Given a File input it will unzip it in the unzip directory.
+ passed as the second parameter
+ @param inFile The zip file as input
+ @param unzipDir The unzip directory where to unzip the zip file.
+ @throws IOException An I/O exception has occurred]]>
+      </doc>
+    </method>
+    <method name="unTar"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="inputStream" type="java.io.InputStream"/>
+      <param name="untarDir" type="java.io.File"/>
+      <param name="gzipped" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <exception name="ExecutionException" type="java.util.concurrent.ExecutionException"/>
+      <doc>
+      <![CDATA[Given a Tar File as input it will untar the file in a the untar directory
+ passed as the second parameter
+
+ This utility will untar ".tar" files and ".tar.gz","tgz" files.
+
+ @param inputStream The tar file as input.
+ @param untarDir The untar directory where to untar the tar file.
+ @param gzipped The input stream is gzipped
+                TODO Use magic number and PusbackInputStream to identify
+ @throws IOException an exception occurred
+ @throws InterruptedException command interrupted
+ @throws ExecutionException task submit failed]]>
+      </doc>
+    </method>
+    <method name="unTar"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="inFile" type="java.io.File"/>
+      <param name="untarDir" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Given a Tar File as input it will untar the file in a the untar directory
+ passed as the second parameter
+
+ This utility will untar ".tar" files and ".tar.gz","tgz" files.
+
+ @param inFile The tar file as input.
+ @param untarDir The untar directory where to untar the tar file.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="symLink" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="target" type="java.lang.String"/>
+      <param name="linkname" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a soft link between a src and destination
+ only on a local disk. HDFS does not support this.
+ On Windows, when symlink creation fails due to security
+ setting, we will log a warning. The return code in this
+ case is 2.
+
+ @param target the target for symlink
+ @param linkname the symlink
+ @return 0 on success]]>
+      </doc>
+    </method>
+    <method name="chmod" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filename" type="java.lang.String"/>
+      <param name="perm" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Change the permissions on a filename.
+ @param filename the name of the file to change
+ @param perm the permission string
+ @return the exit code from the command
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="chmod" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filename" type="java.lang.String"/>
+      <param name="perm" type="java.lang.String"/>
+      <param name="recursive" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Change the permissions on a file / directory, recursively, if
+ needed.
+ @param filename name of the file whose permissions are to change
+ @param perm permission string
+ @param recursive true, if permissions should be changed recursively
+ @return the exit code from the command.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setOwner"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="java.io.File"/>
+      <param name="username" type="java.lang.String"/>
+      <param name="groupname" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the ownership on a file / directory. User name and group name
+ cannot both be null.
+ @param file the file to change
+ @param username the new user owner name
+ @param groupname the new group owner name
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setReadable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="java.io.File"/>
+      <param name="readable" type="boolean"/>
+      <doc>
+      <![CDATA[Platform independent implementation for {@link File#setReadable(boolean)}
+ File#setReadable does not work as expected on Windows.
+ @param f input file
+ @param readable
+ @return true on success, false otherwise]]>
+      </doc>
+    </method>
+    <method name="setWritable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="java.io.File"/>
+      <param name="writable" type="boolean"/>
+      <doc>
+      <![CDATA[Platform independent implementation for {@link File#setWritable(boolean)}
+ File#setWritable does not work as expected on Windows.
+ @param f input file
+ @param writable
+ @return true on success, false otherwise]]>
+      </doc>
+    </method>
+    <method name="setExecutable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="java.io.File"/>
+      <param name="executable" type="boolean"/>
+      <doc>
+      <![CDATA[Platform independent implementation for {@link File#setExecutable(boolean)}
+ File#setExecutable does not work as expected on Windows.
+ Note: revoking execute permission on folders does not have the same
+ behavior on Windows as on Unix platforms. Creating, deleting or renaming
+ a file within that folder will still succeed on Windows.
+ @param f input file
+ @param executable
+ @return true on success, false otherwise]]>
+      </doc>
+    </method>
+    <method name="canRead" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="java.io.File"/>
+      <doc>
+      <![CDATA[Platform independent implementation for {@link File#canRead()}
+ @param f input file
+ @return On Unix, same as {@link File#canRead()}
+         On Windows, true if process has read access on the path]]>
+      </doc>
+    </method>
+    <method name="canWrite" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="java.io.File"/>
+      <doc>
+      <![CDATA[Platform independent implementation for {@link File#canWrite()}
+ @param f input file
+ @return On Unix, same as {@link File#canWrite()}
+         On Windows, true if process has write access on the path]]>
+      </doc>
+    </method>
+    <method name="canExecute" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="java.io.File"/>
+      <doc>
+      <![CDATA[Platform independent implementation for {@link File#canExecute()}
+ @param f input file
+ @return On Unix, same as {@link File#canExecute()}
+         On Windows, true if process has execute access on the path]]>
+      </doc>
+    </method>
+    <method name="setPermission"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="java.io.File"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set permissions to the required value. Uses the java primitives instead
+ of forking if group == other.
+ @param f the file to change
+ @param permission the new permissions
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createLocalTempFile" return="java.io.File"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="basefile" type="java.io.File"/>
+      <param name="prefix" type="java.lang.String"/>
+      <param name="isDeleteOnExit" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a tmp file for a base file.
+ @param basefile the base file of the tmp
+ @param prefix file name prefix of tmp
+ @param isDeleteOnExit if true, the tmp will be deleted when the VM exits
+ @return a newly created tmp file
+ @exception IOException If a tmp file cannot created
+ @see java.io.File#createTempFile(String, String, File)
+ @see java.io.File#deleteOnExit()]]>
+      </doc>
+    </method>
+    <method name="replaceFile"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="java.io.File"/>
+      <param name="target" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Move the src file to the name specified by target.
+ @param src the source file
+ @param target the target file
+ @exception IOException If this operation fails]]>
+      </doc>
+    </method>
+    <method name="listFiles" return="java.io.File[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dir" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[A wrapper for {@link File#listFiles()}. This java.io API returns null
+ when a dir is not a directory or for any I/O error. Instead of having
+ null check everywhere File#listFiles() is used, we will add utility API
+ to get around this problem. For the majority of cases where we prefer
+ an IOException to be thrown.
+ @param dir directory for which listing should be performed
+ @return list of files or empty list
+ @exception IOException for invalid directory or for a bad disk.]]>
+      </doc>
+    </method>
+    <method name="list" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dir" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[A wrapper for {@link File#list()}. This java.io API returns null
+ when a dir is not a directory or for any I/O error. Instead of having
+ null check everywhere File#list() is used, we will add utility API
+ to get around this problem. For the majority of cases where we prefer
+ an IOException to be thrown.
+ @param dir directory for which listing should be performed
+ @return list of file names or empty string list
+ @exception AccessDeniedException for unreadable directory
+ @exception IOException for invalid directory or for bad disk]]>
+      </doc>
+    </method>
+    <method name="createJarWithClassPath" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="inputClassPath" type="java.lang.String"/>
+      <param name="pwd" type="org.apache.hadoop.fs.Path"/>
+      <param name="callerEnv" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createJarWithClassPath" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="inputClassPath" type="java.lang.String"/>
+      <param name="pwd" type="org.apache.hadoop.fs.Path"/>
+      <param name="targetDir" type="org.apache.hadoop.fs.Path"/>
+      <param name="callerEnv" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a jar file at the given path, containing a manifest with a classpath
+ that references all specified entries.
+
+ Some platforms may have an upper limit on command line length.  For example,
+ the maximum command line length on Windows is 8191 characters, but the
+ length of the classpath may exceed this.  To work around this limitation,
+ use this method to create a small intermediate jar with a manifest that
+ contains the full classpath.  It returns the absolute path to the new jar,
+ which the caller may set as the classpath for a new process.
+
+ Environment variable evaluation is not supported within a jar manifest, so
+ this method expands environment variables before inserting classpath entries
+ to the manifest.  The method parses environment variables according to
+ platform-specific syntax (%VAR% on Windows, or $VAR otherwise).  On Windows,
+ environment variables are case-insensitive.  For example, %VAR% and %var%
+ evaluate to the same value.
+
+ Specifying the classpath in a jar manifest does not support wildcards, so
+ this method expands wildcards internally.  Any classpath entry that ends
+ with * is translated to all files at that path with extension .jar or .JAR.
+
+ @param inputClassPath String input classpath to bundle into the jar manifest
+ @param pwd Path to working directory to save jar
+ @param targetDir path to where the jar execution will have its working dir
+ @param callerEnv Map {@literal <}String, String{@literal >} caller's
+ environment variables to use for expansion
+ @return String[] with absolute path to new jar in position 0 and
+   unexpanded wild card entry path in position 1
+ @throws IOException if there is an I/O error while writing the jar file]]>
+      </doc>
+    </method>
+    <method name="getJarsInDirectory" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns all jars that are in the directory. It is useful in expanding a
+ wildcard path to return all jars from the directory to use in a classpath.
+ It operates only on local paths.
+
+ @param path the path to the directory. The path may include the wildcard.
+ @return the list of jars as URLs, or an empty list if there are no jars, or
+ the directory does not exist locally]]>
+      </doc>
+    </method>
+    <method name="getJarsInDirectory" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="java.lang.String"/>
+      <param name="useLocal" type="boolean"/>
+      <doc>
+      <![CDATA[Returns all jars that are in the directory. It is useful in expanding a
+ wildcard path to return all jars from the directory to use in a classpath.
+
+ @param path the path to the directory. The path may include the wildcard.
+ @return the list of jars as URLs, or an empty list if there are no jars, or
+ the directory does not exist]]>
+      </doc>
+    </method>
+    <method name="compareFs" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="srcFs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="destFs" type="org.apache.hadoop.fs.FileSystem"/>
+    </method>
+    <method name="write" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="bytes" type="byte[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes bytes to a file. This utility method opens the file for writing,
+ creating the file if it does not exist, or overwrites an existing file. All
+ bytes in the byte array are written to the file.
+
+ @param fs the file system with which to create the file
+ @param path the path to the file
+ @param bytes the byte array with the bytes to write
+
+ @return the file system
+
+ @throws NullPointerException if any of the arguments are {@code null}
+ @throws IOException if an I/O error occurs creating or writing to the file]]>
+      </doc>
+    </method>
+    <method name="write" return="org.apache.hadoop.fs.FileContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fileContext" type="org.apache.hadoop.fs.FileContext"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="bytes" type="byte[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes bytes to a file. This utility method opens the file for writing,
+ creating the file if it does not exist, or overwrites an existing file. All
+ bytes in the byte array are written to the file.
+
+ @param fileContext the file context with which to create the file
+ @param path the path to the file
+ @param bytes the byte array with the bytes to write
+
+ @return the file context
+
+ @throws NullPointerException if any of the arguments are {@code null}
+ @throws IOException if an I/O error occurs creating or writing to the file]]>
+      </doc>
+    </method>
+    <method name="write" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="lines" type="java.lang.Iterable"/>
+      <param name="cs" type="java.nio.charset.Charset"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write lines of text to a file. Each line is a char sequence and is written
+ to the file in sequence with each line terminated by the platform's line
+ separator, as defined by the system property {@code
+ line.separator}. Characters are encoded into bytes using the specified
+ charset. This utility method opens the file for writing, creating the file
+ if it does not exist, or overwrites an existing file.
+
+ @param fs the file system with which to create the file
+ @param path the path to the file
+ @param lines a Collection to iterate over the char sequences
+ @param cs the charset to use for encoding
+
+ @return the file system
+
+ @throws NullPointerException if any of the arguments are {@code null}
+ @throws IOException if an I/O error occurs creating or writing to the file]]>
+      </doc>
+    </method>
+    <method name="write" return="org.apache.hadoop.fs.FileContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fileContext" type="org.apache.hadoop.fs.FileContext"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="lines" type="java.lang.Iterable"/>
+      <param name="cs" type="java.nio.charset.Charset"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write lines of text to a file. Each line is a char sequence and is written
+ to the file in sequence with each line terminated by the platform's line
+ separator, as defined by the system property {@code
+ line.separator}. Characters are encoded into bytes using the specified
+ charset. This utility method opens the file for writing, creating the file
+ if it does not exist, or overwrites an existing file.
+
+ @param fileContext the file context with which to create the file
+ @param path the path to the file
+ @param lines a Collection to iterate over the char sequences
+ @param cs the charset to use for encoding
+
+ @return the file context
+
+ @throws NullPointerException if any of the arguments are {@code null}
+ @throws IOException if an I/O error occurs creating or writing to the file]]>
+      </doc>
+    </method>
+    <method name="write" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="charseq" type="java.lang.CharSequence"/>
+      <param name="cs" type="java.nio.charset.Charset"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write a line of text to a file. Characters are encoded into bytes using the
+ specified charset. This utility method opens the file for writing, creating
+ the file if it does not exist, or overwrites an existing file.
+
+ @param fs the file system with which to create the file
+ @param path the path to the file
+ @param charseq the char sequence to write to the file
+ @param cs the charset to use for encoding
+
+ @return the file system
+
+ @throws NullPointerException if any of the arguments are {@code null}
+ @throws IOException if an I/O error occurs creating or writing to the file]]>
+      </doc>
+    </method>
+    <method name="write" return="org.apache.hadoop.fs.FileContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileContext"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="charseq" type="java.lang.CharSequence"/>
+      <param name="cs" type="java.nio.charset.Charset"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write a line of text to a file. Characters are encoded into bytes using the
+ specified charset. This utility method opens the file for writing, creating
+ the file if it does not exist, or overwrites an existing file.
+
+ @param FileContext the file context with which to create the file
+ @param path the path to the file
+ @param charseq the char sequence to write to the file
+ @param cs the charset to use for encoding
+
+ @return the file context
+
+ @throws NullPointerException if any of the arguments are {@code null}
+ @throws IOException if an I/O error occurs creating or writing to the file]]>
+      </doc>
+    </method>
+    <method name="write" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="charseq" type="java.lang.CharSequence"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write a line of text to a file. Characters are encoded into bytes using
+ UTF-8. This utility method opens the file for writing, creating the file if
+ it does not exist, or overwrites an existing file.
+
+ @param fs the files system with which to create the file
+ @param path the path to the file
+ @param charseq the char sequence to write to the file
+
+ @return the file system
+
+ @throws NullPointerException if any of the arguments are {@code null}
+ @throws IOException if an I/O error occurs creating or writing to the file]]>
+      </doc>
+    </method>
+    <method name="write" return="org.apache.hadoop.fs.FileContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fileContext" type="org.apache.hadoop.fs.FileContext"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="charseq" type="java.lang.CharSequence"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write a line of text to a file. Characters are encoded into bytes using
+ UTF-8. This utility method opens the file for writing, creating the file if
+ it does not exist, or overwrites an existing file.
+
+ @param fileContext the files system with which to create the file
+ @param path the path to the file
+ @param charseq the char sequence to write to the file
+
+ @return the file context
+
+ @throws NullPointerException if any of the arguments are {@code null}
+ @throws IOException if an I/O error occurs creating or writing to the file]]>
+      </doc>
+    </method>
+    <field name="SYMLINK_NO_PRIVILEGE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A collection of file-processing util methods]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.FileUtil -->
+  <!-- start class org.apache.hadoop.fs.FilterFileSystem -->
+  <class name="FilterFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FilterFileSystem"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FilterFileSystem" type="org.apache.hadoop.fs.FileSystem"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRawFileSystem" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the raw file system 
+ @return FileSystem being filtered]]>
+      </doc>
+    </method>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Called after a new FileSystem instance is constructed.
+ @param name a uri whose authority section names the host, port, etc.
+   for this FileSystem
+ @param conf the configuration]]>
+      </doc>
+    </method>
+    <method name="getUri" return="java.net.URI"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]>
+      </doc>
+    </method>
+    <method name="getCanonicalUri" return="java.net.URI"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="canonicalizeUri" return="java.net.URI"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+    </method>
+    <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Make sure that a path specifies a FileSystem.]]>
+      </doc>
+    </method>
+    <method name="checkPath"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Check that a Path belongs to this FileSystem.]]>
+      </doc>
+    </method>
+    <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="start" type="long"/>
+      <param name="len" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="resolvePath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="bufferSize" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Opens an FSDataInputStream at the indicated Path.
+ @param f the file name to open
+ @param bufferSize the size of the buffer to be used.]]>
+      </doc>
+    </method>
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fd" type="org.apache.hadoop.fs.PathHandle"/>
+      <param name="bufferSize" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createPathHandle" return="org.apache.hadoop.fs.PathHandle"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="opts" type="org.apache.hadoop.fs.Options.HandleOpt[]"/>
+    </method>
+    <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="bufferSize" type="int"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="concat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="psrcs" type="org.apache.hadoop.fs.Path[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="flags" type="java.util.EnumSet"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <param name="checksumOpt" type="org.apache.hadoop.fs.Options.ChecksumOpt"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="listLocatedStatus" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="filter" type="org.apache.hadoop.fs.PathFilter"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createNonRecursive" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="flags" type="java.util.EnumSet"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setReplication" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="replication" type="short"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set replication for an existing file.
+ 
+ @param src file name
+ @param replication new replication
+ @throws IOException
+ @return true if successful;
+         false if file does not exist or is a directory]]>
+      </doc>
+    </method>
+    <method name="rename" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Renames Path src to Path dst.  Can take place on local fs
+ or remote DFS.]]>
+      </doc>
+    </method>
+    <method name="rename"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <param name="options" type="org.apache.hadoop.fs.Options.Rename[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="truncate" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="newLength" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="delete" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="recursive" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Delete a file]]>
+      </doc>
+    </method>
+    <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List files in a directory.]]>
+      </doc>
+    </method>
+    <method name="listCorruptFileBlocks" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="listLocatedStatus" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List files and its block locations in a directory.]]>
+      </doc>
+    </method>
+    <method name="listStatusIterator" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return a remote iterator for listing in a directory]]>
+      </doc>
+    </method>
+    <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setWorkingDirectory"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Set the current working directory for the given file system. All relative
+ paths will be resolved relative to it.
+ 
+ @param newDir]]>
+      </doc>
+    </method>
+    <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current working directory for the given file system
+ 
+ @return the directory pathname]]>
+      </doc>
+    </method>
+    <method name="getInitialWorkingDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStatus" return="org.apache.hadoop.fs.FsStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="mkdirs" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="mkdirs" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="copyFromLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delSrc" type="boolean"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The src file is on the local disk.  Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+      </doc>
+    </method>
+    <method name="copyFromLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delSrc" type="boolean"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="srcs" type="org.apache.hadoop.fs.Path[]"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The src files are on the local disk.  Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+      </doc>
+    </method>
+    <method name="copyFromLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delSrc" type="boolean"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The src file is on the local disk.  Add it to FS at
+ the given dst name.
+ delSrc indicates if the source should be removed]]>
+      </doc>
+    </method>
+    <method name="copyToLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delSrc" type="boolean"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The src file is under FS, and the dst is on the local disk.
+ Copy it from FS control to the local dst name.
+ delSrc indicates if the src will be removed or not.]]>
+      </doc>
+    </method>
+    <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+      <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns a local File that the user can write output to.  The caller
+ provides both the eventual FS target name and the local working
+ file.  If the FS is local, we write directly into the target.  If
+ the FS is remote, we write into the tmp local area.]]>
+      </doc>
+    </method>
+    <method name="completeLocalOutput"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+      <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Called when we're all done writing to the target.  A local FS will
+ do nothing, because we've written to exactly the right place.  A remote
+ FS will copy the contents of tmpLocalFile to the correct target at
+ fsOutputFile.]]>
+      </doc>
+    </method>
+    <method name="getUsed" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the total size of all files in the filesystem.]]>
+      </doc>
+    </method>
+    <method name="getUsed" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the total size of all files from a specified path.]]>
+      </doc>
+    </method>
+    <method name="getDefaultBlockSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDefaultReplication" return="short"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getDefaultBlockSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="getDefaultReplication" return="short"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="getServerDefaults" return="org.apache.hadoop.fs.FsServerDefaults"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get file status.]]>
+      </doc>
+    </method>
+    <method name="msync"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+    </method>
+    <method name="access"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="mode" type="org.apache.hadoop.fs.permission.FsAction"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createSymlink"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="target" type="org.apache.hadoop.fs.Path"/>
+      <param name="link" type="org.apache.hadoop.fs.Path"/>
+      <param name="createParent" type="boolean"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.fs.FileAlreadyExistsException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="ParentNotDirectoryException" type="org.apache.hadoop.fs.ParentNotDirectoryException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getFileLinkStatus" return="org.apache.hadoop.fs.FileStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="UnsupportedFileSystemException" type="org.apache.hadoop.fs.UnsupportedFileSystemException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="supportsSymlinks" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getLinkTarget" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="resolveLink" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="length" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setVerifyChecksum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="verifyChecksum" type="boolean"/>
+    </method>
+    <method name="setWriteChecksum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="writeChecksum" type="boolean"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setOwner"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <param name="username" type="java.lang.String"/>
+      <param name="groupname" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setTimes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <param name="mtime" type="long"/>
+      <param name="atime" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setPermission"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="primitiveCreate" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="absolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="flag" type="java.util.EnumSet"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <param name="checksumOpt" type="org.apache.hadoop.fs.Options.ChecksumOpt"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="primitiveMkdir" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="abdolutePermission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getChildFileSystems" return="org.apache.hadoop.fs.FileSystem[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createSnapshot" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="snapshotName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="renameSnapshot"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="snapshotOldName" type="java.lang.String"/>
+      <param name="snapshotNewName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="deleteSnapshot"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="snapshotName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="modifyAclEntries"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="aclSpec" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="removeAclEntries"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="aclSpec" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="removeDefaultAcl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="removeAcl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setAcl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="aclSpec" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getAclStatus" return="org.apache.hadoop.fs.permission.AclStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setXAttr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="byte[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setXAttr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="byte[]"/>
+      <param name="flag" type="java.util.EnumSet"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getXAttr" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getXAttrs" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getXAttrs" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="names" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="listXAttrs" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="removeXAttr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="satisfyStoragePolicy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setStoragePolicy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="policyName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="unsetStoragePolicy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getStoragePolicy" return="org.apache.hadoop.fs.BlockStoragePolicySpi"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getAllStoragePolicies" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getTrashRoot" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="getTrashRoots" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allUsers" type="boolean"/>
+    </method>
+    <method name="createFile" return="org.apache.hadoop.fs.FSDataOutputStreamBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="appendFile" return="org.apache.hadoop.fs.FSDataOutputStreamBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="openFile" return="org.apache.hadoop.fs.FutureDataInputStreamBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+    </method>
+    <method name="openFile" return="org.apache.hadoop.fs.FutureDataInputStreamBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pathHandle" type="org.apache.hadoop.fs.PathHandle"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+    </method>
+    <method name="openFileWithOptions" return="java.util.concurrent.CompletableFuture"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="parameters" type="org.apache.hadoop.fs.impl.OpenFileParameters"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="openFileWithOptions" return="java.util.concurrent.CompletableFuture"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="pathHandle" type="org.apache.hadoop.fs.PathHandle"/>
+      <param name="parameters" type="org.apache.hadoop.fs.impl.OpenFileParameters"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="hasPathCapability" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="capability" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="swapScheme" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A <code>FilterFileSystem</code> contains
+ some other file system, which it uses as
+ its  basic file system, possibly transforming
+ the data along the way or providing  additional
+ functionality. The class <code>FilterFileSystem</code>
+ itself simply overrides all  methods of
+ <code>FileSystem</code> with versions that
+ pass all requests to the contained  file
+ system. Subclasses of <code>FilterFileSystem</code>
+ may further override some of  these methods
+ and may also provide additional methods
+ and fields.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.FilterFileSystem -->
+  <!-- start interface org.apache.hadoop.fs.FSBuilder -->
+  <interface name="FSBuilder"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="opt" return="B"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set optional Builder parameter.]]>
+      </doc>
+    </method>
+    <method name="opt" return="B"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="boolean"/>
+      <doc>
+      <![CDATA[Set optional boolean parameter for the Builder.
+
+ @see #opt(String, String)]]>
+      </doc>
+    </method>
+    <method name="opt" return="B"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="int"/>
+      <doc>
+      <![CDATA[Set optional int parameter for the Builder.
+
+ @see #opt(String, String)]]>
+      </doc>
+    </method>
+    <method name="opt" return="B"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="float"/>
+      <doc>
+      <![CDATA[Set optional float parameter for the Builder.
+
+ @see #opt(String, String)]]>
+      </doc>
+    </method>
+    <method name="opt" return="B"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="double"/>
+      <doc>
+      <![CDATA[Set optional double parameter for the Builder.
+
+ @see #opt(String, String)]]>
+      </doc>
+    </method>
+    <method name="opt" return="B"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="values" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Set an array of string values as optional parameter for the Builder.
+
+ @see #opt(String, String)]]>
+      </doc>
+    </method>
+    <method name="must" return="B"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set mandatory option to the Builder.
+
+ If the option is not supported or unavailable,
+ the client should expect {@link #build()} throws IllegalArgumentException.]]>
+      </doc>
+    </method>
+    <method name="must" return="B"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="boolean"/>
+      <doc>
+      <![CDATA[Set mandatory boolean option.
+
+ @see #must(String, String)]]>
+      </doc>
+    </method>
+    <method name="must" return="B"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="int"/>
+      <doc>
+      <![CDATA[Set mandatory int option.
+
+ @see #must(String, String)]]>
+      </doc>
+    </method>
+    <method name="must" return="B"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="float"/>
+      <doc>
+      <![CDATA[Set mandatory float option.
+
+ @see #must(String, String)]]>
+      </doc>
+    </method>
+    <method name="must" return="B"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="double"/>
+      <doc>
+      <![CDATA[Set mandatory double option.
+
+ @see #must(String, String)]]>
+      </doc>
+    </method>
+    <method name="must" return="B"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="values" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Set a string array as mandatory option.
+
+ @see #must(String, String)]]>
+      </doc>
+    </method>
+    <method name="build" return="S"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Instantiate the object which was being built.
+
+ @throws IllegalArgumentException if the parameters are not valid.
+ @throws UnsupportedOperationException if the filesystem does not support
+ the specific operation.
+ @throws IOException on filesystem IO errors.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The base interface which various FileSystem FileContext Builder
+ interfaces can extend, and which underlying implementations
+ will then implement.
+ @param <S> Return type on the {@link #build()} call.
+ @param <B> type of builder itself.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.FSBuilder -->
+  <!-- start interface org.apache.hadoop.fs.FsConstants -->
+  <interface name="FsConstants"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <field name="LOCAL_FS_URI" type="java.net.URI"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FTP_SCHEME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MAX_PATH_LINKS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="VIEWFS_URI" type="java.net.URI"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[ViewFs: viewFs file system (ie the mount file system on client side)]]>
+      </doc>
+    </field>
+    <field name="VIEWFS_SCHEME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FS_VIEWFS_OVERLOAD_SCHEME_TARGET_FS_IMPL_PATTERN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="VIEWFS_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[FileSystem related constants.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.FsConstants -->
+  <!-- start class org.apache.hadoop.fs.FSDataInputStream -->
+  <class name="FSDataInputStream" extends="java.io.DataInputStream"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.fs.Seekable"/>
+    <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+    <implements name="org.apache.hadoop.fs.ByteBufferReadable"/>
+    <implements name="org.apache.hadoop.fs.HasFileDescriptor"/>
+    <implements name="org.apache.hadoop.fs.CanSetDropBehind"/>
+    <implements name="org.apache.hadoop.fs.CanSetReadahead"/>
+    <implements name="org.apache.hadoop.fs.HasEnhancedByteBufferAccess"/>
+    <implements name="org.apache.hadoop.fs.CanUnbuffer"/>
+    <implements name="org.apache.hadoop.fs.StreamCapabilities"/>
+    <implements name="org.apache.hadoop.fs.ByteBufferPositionedReadable"/>
+    <implements name="org.apache.hadoop.fs.statistics.IOStatisticsSource"/>
+    <constructor name="FSDataInputStream" type="java.io.InputStream"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="seek"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="desired" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Seek to the given offset.
+
+ @param desired offset to seek to]]>
+      </doc>
+    </method>
+    <method name="getPos" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the current position in the input stream.
+
+ @return current position in the input stream]]>
+      </doc>
+    </method>
+    <method name="read" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="position" type="long"/>
+      <param name="buffer" type="byte[]"/>
+      <param name="offset" type="int"/>
+      <param name="length" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read bytes from the given position in the stream to the given buffer.
+
+ @param position  position in the input stream to seek
+ @param buffer    buffer into which data is read
+ @param offset    offset into the buffer in which data is written
+ @param length    maximum number of bytes to read
+ @return total number of bytes read into the buffer, or <code>-1</code>
+         if there is no more data because the end of the stream has been
+         reached]]>
+      </doc>
+    </method>
+    <method name="readFully"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="position" type="long"/>
+      <param name="buffer" type="byte[]"/>
+      <param name="offset" type="int"/>
+      <param name="length" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read bytes from the given position in the stream to the given buffer.
+ Continues to read until <code>length</code> bytes have been read.
+
+ @param position  position in the input stream to seek
+ @param buffer    buffer into which data is read
+ @param offset    offset into the buffer in which data is written
+ @param length    the number of bytes to read
+ @throws IOException IO problems
+ @throws EOFException If the end of stream is reached while reading.
+                      If an exception is thrown an undetermined number
+                      of bytes in the buffer may have been written.]]>
+      </doc>
+    </method>
+    <method name="readFully"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="position" type="long"/>
+      <param name="buffer" type="byte[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[See {@link #readFully(long, byte[], int, int)}.]]>
+      </doc>
+    </method>
+    <method name="seekToNewSource" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="targetPos" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Seek to the given position on an alternate copy of the data.
+
+ @param  targetPos  position to seek to
+ @return true if a new source is found, false otherwise]]>
+      </doc>
+    </method>
+    <method name="read" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="buf" type="java.nio.ByteBuffer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getFileDescriptor" return="java.io.FileDescriptor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setReadahead"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="readahead" type="java.lang.Long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+    </method>
+    <method name="setDropBehind"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dropBehind" type="java.lang.Boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+    </method>
+    <method name="read" return="java.nio.ByteBuffer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bufferPool" type="org.apache.hadoop.io.ByteBufferPool"/>
+      <param name="maxLength" type="int"/>
+      <param name="opts" type="java.util.EnumSet"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+    </method>
+    <method name="read" return="java.nio.ByteBuffer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="bufferPool" type="org.apache.hadoop.io.ByteBufferPool"/>
+      <param name="maxLength" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+    </method>
+    <method name="releaseBuffer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="buffer" type="java.nio.ByteBuffer"/>
+    </method>
+    <method name="unbuffer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hasCapability" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="capability" type="java.lang.String"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[String value. Includes the string value of the inner stream
+ @return the stream]]>
+      </doc>
+    </method>
+    <method name="read" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="position" type="long"/>
+      <param name="buf" type="java.nio.ByteBuffer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFully"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="position" type="long"/>
+      <param name="buf" type="java.nio.ByteBuffer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getIOStatistics" return="org.apache.hadoop.fs.statistics.IOStatistics"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the IO Statistics of the nested stream, falling back to
+ null if the stream does not implement the interface
+ {@link IOStatisticsSource}.
+ @return an IOStatistics instance or null]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Utility that wraps a {@link FSInputStream} in a {@link DataInputStream}
+ and buffers input through a {@link java.io.BufferedInputStream}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.FSDataInputStream -->
+  <!-- start class org.apache.hadoop.fs.FSDataOutputStream -->
+  <class name="FSDataOutputStream" extends="java.io.DataOutputStream"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.fs.Syncable"/>
+    <implements name="org.apache.hadoop.fs.CanSetDropBehind"/>
+    <implements name="org.apache.hadoop.fs.StreamCapabilities"/>
+    <implements name="org.apache.hadoop.fs.statistics.IOStatisticsSource"/>
+    <implements name="org.apache.hadoop.fs.Abortable"/>
+    <constructor name="FSDataOutputStream" type="java.io.OutputStream, org.apache.hadoop.fs.FileSystem.Statistics"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FSDataOutputStream" type="java.io.OutputStream, org.apache.hadoop.fs.FileSystem.Statistics, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getPos" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current position in the output stream.
+
+ @return the current position in the output stream]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Close the underlying output stream.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hasCapability" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="capability" type="java.lang.String"/>
+    </method>
+    <method name="hflush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="hsync"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setDropBehind"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dropBehind" type="java.lang.Boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getIOStatistics" return="org.apache.hadoop.fs.statistics.IOStatistics"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the IO Statistics of the nested stream, falling back to
+ empty statistics if the stream does not implement the interface
+ {@link IOStatisticsSource}.
+ @return an IOStatistics instance.]]>
+      </doc>
+    </method>
+    <method name="abort" return="org.apache.hadoop.fs.Abortable.AbortableResult"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Invoke {@code abort()} on the wrapped stream if it
+ is Abortable, otherwise raise an
+ {@code UnsupportedOperationException}.
+ @throws UnsupportedOperationException if not available.
+ @return the result.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Utility that wraps a {@link OutputStream} in a {@link DataOutputStream}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.FSDataOutputStream -->
+  <!-- start class org.apache.hadoop.fs.FSDataOutputStreamBuilder -->
+  <class name="FSDataOutputStreamBuilder" extends="org.apache.hadoop.fs.impl.AbstractFSBuilderImpl"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FSDataOutputStreamBuilder" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor.]]>
+      </doc>
+    </constructor>
+    <method name="getThisBuilder" return="B"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the concrete implementation of the builder instance.]]>
+      </doc>
+    </method>
+    <method name="getFS" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="permission" return="B"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="perm" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <doc>
+      <![CDATA[Set permission for the file.]]>
+      </doc>
+    </method>
+    <method name="getBufferSize" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="bufferSize" return="B"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bufSize" type="int"/>
+      <doc>
+      <![CDATA[Set the size of the buffer to be used.]]>
+      </doc>
+    </method>
+    <method name="getReplication" return="short"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="replication" return="B"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="replica" type="short"/>
+      <doc>
+      <![CDATA[Set replication factor.]]>
+      </doc>
+    </method>
+    <method name="getBlockSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="blockSize" return="B"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="blkSize" type="long"/>
+      <doc>
+      <![CDATA[Set block size.]]>
+      </doc>
+    </method>
+    <method name="isRecursive" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return true to create the parent directories if they do not exist.]]>
+      </doc>
+    </method>
+    <method name="recursive" return="B"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create the parent directory if they do not exist.]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="org.apache.hadoop.util.Progressable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="progress" return="B"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="prog" type="org.apache.hadoop.util.Progressable"/>
+      <doc>
+      <![CDATA[Set the facility of reporting progress.]]>
+      </doc>
+    </method>
+    <method name="getFlags" return="java.util.EnumSet"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="create" return="B"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an FSDataOutputStream at the specified path.]]>
+      </doc>
+    </method>
+    <method name="overwrite" return="B"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="overwrite" type="boolean"/>
+      <doc>
+      <![CDATA[Set to true to overwrite the existing file.
+ Set it to false, an exception will be thrown when calling {@link #build()}
+ if the file exists.]]>
+      </doc>
+    </method>
+    <method name="append" return="B"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Append to an existing file (optional operation).]]>
+      </doc>
+    </method>
+    <method name="getChecksumOpt" return="org.apache.hadoop.fs.Options.ChecksumOpt"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="checksumOpt" return="B"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="chksumOpt" type="org.apache.hadoop.fs.Options.ChecksumOpt"/>
+      <doc>
+      <![CDATA[Set checksum opt.]]>
+      </doc>
+    </method>
+    <method name="build" return="S"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create the FSDataOutputStream to write on the file system.
+
+ @throws IllegalArgumentException if the parameters are not valid.
+ @throws IOException on errors when file system creates or appends the file.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Builder for {@link FSDataOutputStream} and its subclasses.
+
+ It is used to create {@link FSDataOutputStream} when creating a new file or
+ appending an existing file on {@link FileSystem}.
+
+ By default, it does not create parent directory that do not exist.
+ {@link FileSystem#createNonRecursive(Path, boolean, int, short, long,
+ Progressable)}.
+
+ To create missing parent directory, use {@link #recursive()}.
+
+ To be more generic, {@link #opt(String, int)} and {@link #must(String, int)}
+ variants provide implementation-agnostic way to customize the builder.
+ Each FS-specific builder implementation can interpret the FS-specific
+ options accordingly, for example:
+
+ <code>
+
+ // Don't
+ if (fs instanceof FooFileSystem) {
+   FooFileSystem fs = (FooFileSystem) fs;
+   OutputStream out = dfs.createFile(path)
+     .optionA()
+     .optionB("value")
+     .cache()
+   .build()
+ } else if (fs instanceof BarFileSystem) {
+   ...
+ }
+
+ // Do
+ OutputStream out = fs.createFile(path)
+   .permission(perm)
+   .bufferSize(bufSize)
+   .opt("foofs:option.a", true)
+   .opt("foofs:option.b", "value")
+   .opt("barfs:cache", true)
+   .must("foofs:cache", true)
+   .must("barfs:cache-size", 256 * 1024 * 1024)
+   .build();
+ </code>
+
+ If the option is not related to the file system, the option will be ignored.
+ If the option is must, but not supported by the file system, a
+ {@link IllegalArgumentException} will be thrown.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.FSDataOutputStreamBuilder -->
+  <!-- start class org.apache.hadoop.fs.FSError -->
+  <class name="FSError" extends="java.lang.Error"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <doc>
+    <![CDATA[Thrown for unexpected filesystem errors, presumed to reflect disk errors
+ in the native filesystem.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.FSError -->
+  <!-- start class org.apache.hadoop.fs.FSInputStream -->
+  <class name="FSInputStream" extends="java.io.InputStream"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.fs.Seekable"/>
+    <implements name="org.apache.hadoop.fs.PositionedReadable"/>
+    <constructor name="FSInputStream"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="seek"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pos" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location.  Can't
+ seek past the end of the file.]]>
+      </doc>
+    </method>
+    <method name="getPos" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the current offset from the start of the file]]>
+      </doc>
+    </method>
+    <method name="seekToNewSource" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="targetPos" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Seeks a different copy of the data.  Returns true if 
+ found a new source, false otherwise.]]>
+      </doc>
+    </method>
+    <method name="read" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="position" type="long"/>
+      <param name="buffer" type="byte[]"/>
+      <param name="offset" type="int"/>
+      <param name="length" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="validatePositionedReadArgs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="position" type="long"/>
+      <param name="buffer" type="byte[]"/>
+      <param name="offset" type="int"/>
+      <param name="length" type="int"/>
+      <exception name="EOFException" type="java.io.EOFException"/>
+      <doc>
+      <![CDATA[Validation code, available for use in subclasses.
+ @param position position: if negative an EOF exception is raised
+ @param buffer destination buffer
+ @param offset offset within the buffer
+ @param length length of bytes to read
+ @throws EOFException if the position is negative
+ @throws IndexOutOfBoundsException if there isn't space for the amount of
+ data requested.
+ @throws IllegalArgumentException other arguments are invalid.]]>
+      </doc>
+    </method>
+    <method name="readFully"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="position" type="long"/>
+      <param name="buffer" type="byte[]"/>
+      <param name="offset" type="int"/>
+      <param name="length" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFully"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="position" type="long"/>
+      <param name="buffer" type="byte[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[toString method returns the superclass toString, but if the subclass
+ implements {@link IOStatisticsSource} then those statistics are
+ extracted and included in the output.
+ That is: statistics of subclasses are automatically reported.
+ @return a string value.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[FSInputStream is a generic old InputStream with a little bit
+ of RAF-style seek ability.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.FSInputStream -->
+  <!-- start class org.apache.hadoop.fs.FsServerDefaults -->
+  <class name="FsServerDefaults" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="FsServerDefaults"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FsServerDefaults" type="long, int, int, short, int, boolean, long, org.apache.hadoop.util.DataChecksum.Type"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FsServerDefaults" type="long, int, int, short, int, boolean, long, org.apache.hadoop.util.DataChecksum.Type, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FsServerDefaults" type="long, int, int, short, int, boolean, long, org.apache.hadoop.util.DataChecksum.Type, java.lang.String, byte"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getBlockSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getBytesPerChecksum" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getWritePacketSize" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getReplication" return="short"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFileBufferSize" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getEncryptDataTransfer" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTrashInterval" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getChecksumType" return="org.apache.hadoop.util.DataChecksum.Type"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getKeyProviderUri" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDefaultStoragePolicyId" return="byte"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Provides server default configuration values to clients.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.FsServerDefaults -->
+  <!-- start class org.apache.hadoop.fs.FsStatus -->
+  <class name="FsStatus" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="FsStatus" type="long, long, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a FsStatus object, using the specified statistics]]>
+      </doc>
+    </constructor>
+    <method name="getCapacity" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the capacity in bytes of the file system]]>
+      </doc>
+    </method>
+    <method name="getUsed" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the number of bytes used on the file system]]>
+      </doc>
+    </method>
+    <method name="getRemaining" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the number of remaining bytes on the file system]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[This class is used to represent the capacity, free and used space on a
+ {@link FileSystem}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.FsStatus -->
+  <!-- start interface org.apache.hadoop.fs.FutureDataInputStreamBuilder -->
+  <interface name="FutureDataInputStreamBuilder"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.fs.FSBuilder"/>
+    <method name="build" return="java.util.concurrent.CompletableFuture"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="withFileStatus" return="org.apache.hadoop.fs.FutureDataInputStreamBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
+      <doc>
+      <![CDATA[A FileStatus may be provided to the open request.
+ It is up to the implementation whether to use this or not.
+ @param status status.
+ @return the builder.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Builder for input streams and subclasses whose return value is
+ actually a completable future: this allows for better asynchronous
+ operation.
+
+ To be more generic, {@link #opt(String, int)} and {@link #must(String, int)}
+ variants provide implementation-agnostic way to customize the builder.
+ Each FS-specific builder implementation can interpret the FS-specific
+ options accordingly, for example:
+
+ If the option is not related to the file system, the option will be ignored.
+ If the option is must, but not supported by the file system, a
+ {@link IllegalArgumentException} will be thrown.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.FutureDataInputStreamBuilder -->
+  <!-- start class org.apache.hadoop.fs.GlobalStorageStatistics -->
+  <class name="GlobalStorageStatistics" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.fs.GlobalStorageStatistics[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.fs.GlobalStorageStatistics"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="get" return="org.apache.hadoop.fs.StorageStatistics"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the StorageStatistics object with the given name.
+
+ @param name        The storage statistics object name.
+ @return            The StorageStatistics object with the given name, or
+                      null if there is none.]]>
+      </doc>
+    </method>
+    <method name="put" return="org.apache.hadoop.fs.StorageStatistics"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="provider" type="org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider"/>
+      <doc>
+      <![CDATA[Create or return the StorageStatistics object with the given name.
+
+ @param name        The storage statistics object name.
+ @param provider    An object which can create a new StorageStatistics
+                      object if needed.
+ @return            The StorageStatistics object with the given name.
+ @throws RuntimeException  If the StorageStatisticsProvider provides a null
+                           object or a new StorageStatistics object with the
+                           wrong name.]]>
+      </doc>
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Reset all global storage statistics.]]>
+      </doc>
+    </method>
+    <method name="iterator" return="java.util.Iterator"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get an iterator that we can use to iterate throw all the global storage
+ statistics objects.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Stores global storage statistics objects.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.GlobalStorageStatistics -->
+  <!-- start class org.apache.hadoop.fs.GlobFilter -->
+  <class name="GlobFilter" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.fs.PathFilter"/>
+    <constructor name="GlobFilter" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Creates a glob filter with the specified file pattern.
+
+ @param filePattern the file pattern.
+ @throws IOException thrown if the file pattern is incorrect.]]>
+      </doc>
+    </constructor>
+    <constructor name="GlobFilter" type="java.lang.String, org.apache.hadoop.fs.PathFilter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Creates a glob filter with the specified file pattern and an user filter.
+
+ @param filePattern the file pattern.
+ @param filter user filter in addition to the glob pattern.
+ @throws IOException thrown if the file pattern is incorrect.]]>
+      </doc>
+    </constructor>
+    <method name="hasPattern" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="accept" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <doc>
+    <![CDATA[A filter for POSIX glob pattern with brace expansions.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.GlobFilter -->
+  <!-- start class org.apache.hadoop.fs.InvalidPathException -->
+  <class name="InvalidPathException" extends="org.apache.hadoop.HadoopIllegalArgumentException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="InvalidPathException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs exception with the specified detail message.
+ 
+ @param path invalid path.]]>
+      </doc>
+    </constructor>
+    <constructor name="InvalidPathException" type="java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs exception with the specified detail message.
+ 
+ @param path invalid path.
+ @param reason Reason <code>path</code> is invalid]]>
+      </doc>
+    </constructor>
+    <doc>
+    <![CDATA[Path string is invalid either because it has invalid characters or due to
+ other file system specific reasons.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.InvalidPathException -->
+  <!-- start class org.apache.hadoop.fs.InvalidPathHandleException -->
+  <class name="InvalidPathHandleException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="InvalidPathHandleException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="InvalidPathHandleException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Thrown when the constraints enoded in a {@link PathHandle} do not hold.
+ For example, if a handle were created with the default
+ {@link Options.HandleOpt#path()} constraints, a call to
+ {@link FileSystem#open(PathHandle)} would succeed if the file were
+ modified, but if a different file was at that location then it would throw
+ this exception.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.InvalidPathHandleException -->
+  <!-- start class org.apache.hadoop.fs.LocalFileSystem -->
+  <class name="LocalFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="LocalFileSystem"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="LocalFileSystem" type="org.apache.hadoop.fs.FileSystem"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getScheme" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the protocol scheme for the FileSystem.
+ <p>
+
+ @return <code>file</code>]]>
+      </doc>
+    </method>
+    <method name="getRaw" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="pathToFile" return="java.io.File"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Convert a path to a File.]]>
+      </doc>
+    </method>
+    <method name="copyFromLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delSrc" type="boolean"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="copyToLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delSrc" type="boolean"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="reportChecksumFailure" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
+      <param name="inPos" type="long"/>
+      <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
+      <param name="sumsPos" type="long"/>
+      <doc>
+      <![CDATA[Moves files to a bad file directory on the same device, so that their
+ storage will not be reused.]]>
+      </doc>
+    </method>
+    <method name="supportsSymlinks" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createSymlink"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="target" type="org.apache.hadoop.fs.Path"/>
+      <param name="link" type="org.apache.hadoop.fs.Path"/>
+      <param name="createParent" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getFileLinkStatus" return="org.apache.hadoop.fs.FileStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getLinkTarget" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Implement the FileSystem API for the checksumed local filesystem.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.LocalFileSystem -->
+  <!-- start class org.apache.hadoop.fs.LocatedFileStatus -->
+  <class name="LocatedFileStatus" extends="org.apache.hadoop.fs.FileStatus"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="LocatedFileStatus"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="LocatedFileStatus" type="org.apache.hadoop.fs.FileStatus, org.apache.hadoop.fs.BlockLocation[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor 
+ @param stat a file status
+ @param locations a file's block locations]]>
+      </doc>
+    </constructor>
+    <constructor name="LocatedFileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path, org.apache.hadoop.fs.BlockLocation[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor
+ 
+ @param length a file's length
+ @param isdir if the path is a directory
+ @param block_replication the file's replication factor
+ @param blocksize a file's block size
+ @param modification_time a file's modification time
+ @param access_time a file's access time
+ @param permission a file's permission
+ @param owner a file's owner
+ @param group a file's group
+ @param symlink symlink if the path is a symbolic link
+ @param path the path's qualified name
+ @param locations a file's block locations]]>
+      </doc>
+    </constructor>
+    <constructor name="LocatedFileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path, boolean, boolean, boolean, org.apache.hadoop.fs.BlockLocation[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor.
+
+ @param length a file's length
+ @param isdir if the path is a directory
+ @param block_replication the file's replication factor
+ @param blocksize a file's block size
+ @param modification_time a file's modification time
+ @param access_time a file's access time
+ @param permission a file's permission
+ @param owner a file's owner
+ @param group a file's group
+ @param symlink symlink if the path is a symbolic link
+ @param path the path's qualified name
+ @param hasAcl entity has associated ACLs
+ @param isEncrypted entity is encrypted
+ @param isErasureCoded entity is erasure coded
+ @param locations a file's block locations]]>
+      </doc>
+    </constructor>
+    <constructor name="LocatedFileStatus" type="long, boolean, int, long, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path, java.util.Set, org.apache.hadoop.fs.BlockLocation[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor.
+
+ @param length a file's length
+ @param isdir if the path is a directory
+ @param block_replication the file's replication factor
+ @param blocksize a file's block size
+ @param modification_time a file's modification time
+ @param access_time a file's access time
+ @param permission a file's permission
+ @param owner a file's owner
+ @param group a file's group
+ @param symlink symlink if the path is a symbolic link
+ @param path the path's qualified name
+ @param attr Attribute flags (See {@link FileStatus.AttrFlags}).
+ @param locations a file's block locations]]>
+      </doc>
+    </constructor>
+    <method name="getBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the file's block locations
+
+ In HDFS, the returned BlockLocation will have different formats for
+ replicated and erasure coded file.
+ Please refer to
+ {@link FileSystem#getFileBlockLocations(FileStatus, long, long)}
+ for more details.
+
+ @return the file's block locations]]>
+      </doc>
+    </method>
+    <method name="setBlockLocations"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="locations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+      <doc>
+      <![CDATA[Hook for subclasses to lazily set block locations. The {@link #locations}
+ field should be null before this is called.
+ @param locations Block locations for this instance.]]>
+      </doc>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.fs.FileStatus"/>
+      <doc>
+      <![CDATA[Compare this FileStatus to another FileStatus
+ @param   o the FileStatus to be compared.
+ @return  a negative integer, zero, or a positive integer as this object
+   is less than, equal to, or greater than the specified object.]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Compare if this object is equal to another object
+ @param   o the object to be compared.
+ @return  true if two file status has the same path name; false if not.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns a hash code value for the object, which is defined as
+ the hash code of the path name.
+
+ @return  a hash code value for the path name.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class defines a FileStatus that includes a file's block locations.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.LocatedFileStatus -->
+  <!-- start interface org.apache.hadoop.fs.MultipartUploader -->
+  <interface name="MultipartUploader"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Closeable"/>
+    <implements name="org.apache.hadoop.fs.statistics.IOStatisticsSource"/>
+    <method name="startUpload" return="java.util.concurrent.CompletableFuture"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filePath" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Initialize a multipart upload.
+ @param filePath Target path for upload.
+ @return unique identifier associating part uploads.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="putPart" return="java.util.concurrent.CompletableFuture"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uploadId" type="org.apache.hadoop.fs.UploadHandle"/>
+      <param name="partNumber" type="int"/>
+      <param name="filePath" type="org.apache.hadoop.fs.Path"/>
+      <param name="inputStream" type="java.io.InputStream"/>
+      <param name="lengthInBytes" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Put part as part of a multipart upload.
+ It is possible to have parts uploaded in any order (or in parallel).
+ @param uploadId Identifier from {@link #startUpload(Path)}.
+ @param partNumber Index of the part relative to others.
+ @param filePath Target path for upload (as {@link #startUpload(Path)}).
+ @param inputStream Data for this part. Implementations MUST close this
+ stream after reading in the data.
+ @param lengthInBytes Target length to read from the stream.
+ @return unique PartHandle identifier for the uploaded part.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="complete" return="java.util.concurrent.CompletableFuture"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uploadId" type="org.apache.hadoop.fs.UploadHandle"/>
+      <param name="filePath" type="org.apache.hadoop.fs.Path"/>
+      <param name="handles" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Complete a multipart upload.
+ @param uploadId Identifier from {@link #startUpload(Path)}.
+ @param filePath Target path for upload (as {@link #startUpload(Path)}.
+ @param handles non-empty map of part number to part handle.
+          from {@link #putPart(UploadHandle, int, Path, InputStream, long)}.
+ @return unique PathHandle identifier for the uploaded file.
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <method name="abort" return="java.util.concurrent.CompletableFuture"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uploadId" type="org.apache.hadoop.fs.UploadHandle"/>
+      <param name="filePath" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Aborts a multipart upload.
+ @param uploadId Identifier from {@link #startUpload(Path)}.
+ @param filePath Target path for upload (same as {@link #startUpload(Path)}.
+ @throws IOException IO failure
+ @return a future; the operation will have completed]]>
+      </doc>
+    </method>
+    <method name="abortUploadsUnderPath" return="java.util.concurrent.CompletableFuture"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Best effort attempt to aborts multipart uploads under a path.
+ Not all implementations support this, and those which do may
+ be vulnerable to eventually consistent listings of current uploads
+ -some may be missed.
+ @param path path to abort uploads under.
+ @return a future to the number of entries aborted;
+ -1 if aborting is unsupported
+ @throws IOException IO failure]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[MultipartUploader is an interface for copying files multipart and across
+ multiple nodes.
+ <p></p>
+ The interface extends {@link IOStatisticsSource} so that there is no
+ need to cast an instance to see if is a source of statistics.
+ However, implementations MAY return null for their actual statistics.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.MultipartUploader -->
+  <!-- start class org.apache.hadoop.fs.Options -->
+  <class name="Options" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Options"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This class contains options related to file system operations.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.Options -->
+  <!-- start class org.apache.hadoop.fs.ParentNotDirectoryException -->
+  <class name="ParentNotDirectoryException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ParentNotDirectoryException"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ParentNotDirectoryException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Indicates that the parent of specified Path is not a directory
+ as expected.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.ParentNotDirectoryException -->
+  <!-- start interface org.apache.hadoop.fs.PartHandle -->
+  <interface name="PartHandle"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Serializable"/>
+    <method name="toByteArray" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Serialized from in bytes.]]>
+      </doc>
+    </method>
+    <method name="bytes" return="java.nio.ByteBuffer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <doc>
+    <![CDATA[Opaque, serializable reference to a part id for multipart uploads.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.PartHandle -->
+  <!-- start class org.apache.hadoop.fs.PartialListing -->
+  <class name="PartialListing" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="PartialListing" type="org.apache.hadoop.fs.Path, java.util.List"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="PartialListing" type="org.apache.hadoop.fs.Path, org.apache.hadoop.ipc.RemoteException"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="get" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Partial listing of the path being listed. In the case where the path is
+ a file. The list will be a singleton with the file itself.
+
+ @return Partial listing of the path being listed.
+ @throws IOException if there was an exception getting the listing.]]>
+      </doc>
+    </method>
+    <method name="getListedPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Path being listed.
+
+ @return the path being listed.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A partial listing of the children of a parent directory. Since it is a
+ partial listing, multiple PartialListing may need to be combined to obtain
+ the full listing of a parent directory.
+ <p/>
+ ListingBatch behaves similar to a Future, in that getting the result via
+ {@link #get()} will throw an Exception if there was a failure.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.PartialListing -->
+  <!-- start class org.apache.hadoop.fs.Path -->
+  <class name="Path" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <implements name="java.io.Serializable"/>
+    <implements name="java.io.ObjectInputValidation"/>
+    <constructor name="Path" type="java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new Path based on the child path resolved against the parent path.
+
+ @param parent the parent path
+ @param child the child path]]>
+      </doc>
+    </constructor>
+    <constructor name="Path" type="org.apache.hadoop.fs.Path, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new Path based on the child path resolved against the parent path.
+
+ @param parent the parent path
+ @param child the child path]]>
+      </doc>
+    </constructor>
+    <constructor name="Path" type="java.lang.String, org.apache.hadoop.fs.Path"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new Path based on the child path resolved against the parent path.
+
+ @param parent the parent path
+ @param child the child path]]>
+      </doc>
+    </constructor>
+    <constructor name="Path" type="org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new Path based on the child path resolved against the parent path.
+
+ @param parent the parent path
+ @param child the child path]]>
+      </doc>
+    </constructor>
+    <constructor name="Path" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+      <doc>
+      <![CDATA[Construct a path from a String.  Path strings are URIs, but with
+ unescaped elements and some additional normalization.
+
+ @param pathString the path string]]>
+      </doc>
+    </constructor>
+    <constructor name="Path" type="java.net.URI"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a path from a URI
+
+ @param aUri the source URI]]>
+      </doc>
+    </constructor>
+    <constructor name="Path" type="java.lang.String, java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a Path from components.
+
+ @param scheme the scheme
+ @param authority the authority
+ @param path the path]]>
+      </doc>
+    </constructor>
+    <method name="getPathWithoutSchemeAndAuthority" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Return a version of the given Path without the scheme information.
+
+ @param path the source Path
+ @return a copy of this Path without the scheme information]]>
+      </doc>
+    </method>
+    <method name="mergePaths" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path1" type="org.apache.hadoop.fs.Path"/>
+      <param name="path2" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Merge 2 paths such that the second path is appended relative to the first.
+ The returned path has the scheme and authority of the first path.  On
+ Windows, the drive specification in the second path is discarded.
+ 
+ @param path1 the first path
+ @param path2 the second path, to be appended relative to path1
+ @return the merged path]]>
+      </doc>
+    </method>
+    <method name="isWindowsAbsolutePath" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pathString" type="java.lang.String"/>
+      <param name="slashed" type="boolean"/>
+      <doc>
+      <![CDATA[Determine whether a given path string represents an absolute path on
+ Windows. e.g. "C:/a/b" is an absolute path. "C:a/b" is not.
+
+ @param pathString the path string to evaluate
+ @param slashed true if the given path is prefixed with "/"
+ @return true if the supplied path looks like an absolute path with a Windows
+ drive-specifier]]>
+      </doc>
+    </method>
+    <method name="toUri" return="java.net.URI"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Convert this Path to a URI.
+
+ @return this Path as a URI]]>
+      </doc>
+    </method>
+    <method name="getFileSystem" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the FileSystem that owns this Path.
+
+ @param conf the configuration to use when resolving the FileSystem
+ @return the FileSystem that owns this Path
+ @throws java.io.IOException thrown if there's an issue resolving the
+ FileSystem]]>
+      </doc>
+    </method>
+    <method name="isAbsoluteAndSchemeAuthorityNull" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns true if the path component (i.e. directory) of this URI is
+ absolute <strong>and</strong> the scheme is null, <b>and</b> the authority
+ is null.
+
+ @return whether the path is absolute and the URI has no scheme nor
+ authority parts]]>
+      </doc>
+    </method>
+    <method name="isUriPathAbsolute" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns true if the path component (i.e. directory) of this URI is
+ absolute.
+
+ @return whether this URI's path is absolute]]>
+      </doc>
+    </method>
+    <method name="isAbsolute" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns true if the path component (i.e. directory) of this URI is
+ absolute.  This method is a wrapper for {@link #isUriPathAbsolute()}.
+
+ @return whether this URI's path is absolute]]>
+      </doc>
+    </method>
+    <method name="isRoot" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns true if and only if this path represents the root of a file system.
+
+ @return true if and only if this path represents the root of a file system]]>
+      </doc>
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the final component of this path.
+
+ @return the final component of this path]]>
+      </doc>
+    </method>
+    <method name="getParent" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the parent of a path or null if at root.
+ @return the parent of a path or null if at root]]>
+      </doc>
+    </method>
+    <method name="suffix" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="suffix" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Adds a suffix to the final name in the path.
+
+ @param suffix the suffix to add
+ @return a new path with the suffix added]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="depth" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the number of elements in this path.
+ @return the number of elements in this path]]>
+      </doc>
+    </method>
+    <method name="makeQualified" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #makeQualified(URI, Path)}">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <doc>
+      <![CDATA[Returns a qualified path object for the {@link FileSystem}'s working
+ directory.
+  
+ @param fs the target FileSystem
+ @return a qualified path object for the FileSystem's working directory
+ @deprecated use {@link #makeQualified(URI, Path)}]]>
+      </doc>
+    </method>
+    <method name="validateObject"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="InvalidObjectException" type="java.io.InvalidObjectException"/>
+      <doc>
+      <![CDATA[Validate the contents of a deserialized Path, so as
+ to defend against malicious object streams.
+ @throws InvalidObjectException if there's no URI]]>
+      </doc>
+    </method>
+    <field name="SEPARATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The directory separator, a slash.]]>
+      </doc>
+    </field>
+    <field name="SEPARATOR_CHAR" type="char"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The directory separator, a slash, as a character.]]>
+      </doc>
+    </field>
+    <field name="CUR_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The current directory, ".".]]>
+      </doc>
+    </field>
+    <field name="WINDOWS" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Whether the current host is a Windows machine.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[Names a file or directory in a {@link FileSystem}.
+ Path strings use slash as the directory separator.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.Path -->
+  <!-- start interface org.apache.hadoop.fs.PathFilter -->
+  <interface name="PathFilter"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="accept" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Tests whether or not the specified abstract pathname should be
+ included in a pathname list.
+
+ @param  path  The abstract pathname to be tested
+ @return  <code>true</code> if and only if <code>pathname</code>
+          should be included]]>
+      </doc>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.PathFilter -->
+  <!-- start interface org.apache.hadoop.fs.PathHandle -->
+  <interface name="PathHandle"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Serializable"/>
+    <method name="toByteArray" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Serialized form in bytes.]]>
+      </doc>
+    </method>
+    <method name="bytes" return="java.nio.ByteBuffer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the bytes of this path handle.
+ @return the bytes to get to the process completing the upload.]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <doc>
+    <![CDATA[Opaque, serializable reference to an entity in the FileSystem. May contain
+ metadata sufficient to resolve or verify subsequent accesses independent of
+ other modifications to the FileSystem.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.PathHandle -->
+  <!-- start interface org.apache.hadoop.fs.PositionedReadable -->
+  <interface name="PositionedReadable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="read" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="position" type="long"/>
+      <param name="buffer" type="byte[]"/>
+      <param name="offset" type="int"/>
+      <param name="length" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read up to the specified number of bytes, from a given
+ position within a file, and return the number of bytes read. This does not
+ change the current offset of a file, and is thread-safe.
+
+ <i>Warning: Not all filesystems satisfy the thread-safety requirement.</i>
+ @param position position within file
+ @param buffer destination buffer
+ @param offset offset in the buffer
+ @param length number of bytes to read
+ @return actual number of bytes read; -1 means "none"
+ @throws IOException IO problems.]]>
+      </doc>
+    </method>
+    <method name="readFully"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="position" type="long"/>
+      <param name="buffer" type="byte[]"/>
+      <param name="offset" type="int"/>
+      <param name="length" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read the specified number of bytes, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.
+
+ <i>Warning: Not all filesystems satisfy the thread-safety requirement.</i>
+ @param position position within file
+ @param buffer destination buffer
+ @param offset offset in the buffer
+ @param length number of bytes to read
+ @throws IOException IO problems.
+ @throws EOFException the end of the data was reached before
+ the read operation completed]]>
+      </doc>
+    </method>
+    <method name="readFully"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="position" type="long"/>
+      <param name="buffer" type="byte[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read number of bytes equal to the length of the buffer, from a given
+ position within a file. This does not
+ change the current offset of a file, and is thread-safe.
+
+ <i>Warning: Not all filesystems satisfy the thread-safety requirement.</i>
+ @param position position within file
+ @param buffer destination buffer
+ @throws IOException IO problems.
+ @throws EOFException the end of the data was reached before
+ the read operation completed]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Stream that permits positional reading.
+
+ Implementations are required to implement thread-safe operations; this may
+ be supported by concurrent access to the data, or by using a synchronization
+ mechanism to serialize access.
+
+ Not all implementations meet this requirement. Those that do not cannot
+ be used as a backing store for some applications, such as Apache HBase.
+
+ Independent of whether or not they are thread safe, some implementations
+ may make the intermediate state of the system, specifically the position
+ obtained in {@code Seekable.getPos()} visible.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.PositionedReadable -->
+  <!-- start class org.apache.hadoop.fs.QuotaUsage -->
+  <class name="QuotaUsage" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="QuotaUsage"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="QuotaUsage" type="org.apache.hadoop.fs.QuotaUsage.Builder"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Build the instance based on the builder.]]>
+      </doc>
+    </constructor>
+    <method name="setQuota"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="quota" type="long"/>
+    </method>
+    <method name="setSpaceConsumed"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="spaceConsumed" type="long"/>
+    </method>
+    <method name="setSpaceQuota"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="spaceQuota" type="long"/>
+    </method>
+    <method name="getFileAndDirectoryCount" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the directory count.]]>
+      </doc>
+    </method>
+    <method name="getQuota" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the directory quota.]]>
+      </doc>
+    </method>
+    <method name="getSpaceConsumed" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return (disk) space consumed.]]>
+      </doc>
+    </method>
+    <method name="getSpaceQuota" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return (disk) space quota.]]>
+      </doc>
+    </method>
+    <method name="getTypeQuota" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="org.apache.hadoop.fs.StorageType"/>
+      <doc>
+      <![CDATA[Return storage type quota.]]>
+      </doc>
+    </method>
+    <method name="getTypeConsumed" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="org.apache.hadoop.fs.StorageType"/>
+      <doc>
+      <![CDATA[Return storage type consumed.]]>
+      </doc>
+    </method>
+    <method name="isTypeQuotaSet" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return true if any storage type quota has been set.]]>
+      </doc>
+    </method>
+    <method name="isTypeConsumedAvailable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return true if any storage type consumption information is available.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="getHeader" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the header of the output.
+ @return the header of the output]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hOption" type="boolean"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hOption" type="boolean"/>
+      <param name="tOption" type="boolean"/>
+      <param name="types" type="java.util.List"/>
+      <doc>
+      <![CDATA[Return the string representation of the object in the output format.
+ if hOption is false file sizes are returned in bytes
+ if hOption is true file sizes are returned in human readable
+
+ @param hOption a flag indicating if human readable output if to be used
+ @return the string representation of the object]]>
+      </doc>
+    </method>
+    <method name="getQuotaUsage" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="hOption" type="boolean"/>
+    </method>
+    <method name="getTypesQuotaUsage" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="hOption" type="boolean"/>
+      <param name="types" type="java.util.List"/>
+    </method>
+    <method name="getStorageTypeHeader" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="storageTypes" type="java.util.List"/>
+      <doc>
+      <![CDATA[return the header of with the StorageTypes.
+
+ @param storageTypes
+ @return storage header string]]>
+      </doc>
+    </method>
+    <field name="QUOTA_STRING_FORMAT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Output format:
+ |----12----| |----15----| |----15----| |----15----| |-------18-------|
+    QUOTA   REMAINING_QUOTA SPACE_QUOTA SPACE_QUOTA_REM FILE_NAME]]>
+      </doc>
+    </field>
+    <field name="SPACE_QUOTA_STRING_FORMAT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="QUOTA_HEADER_FIELDS" type="java.lang.String[]"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="QUOTA_HEADER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Store the quota usage of a directory.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.QuotaUsage -->
+  <!-- start class org.apache.hadoop.fs.RawLocalFileSystem -->
+  <class name="RawLocalFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RawLocalFileSystem"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="useStatIfAvailable"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="pathToFile" return="java.io.File"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Convert a path to a File.]]>
+      </doc>
+    </method>
+    <method name="getUri" return="java.net.URI"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="bufferSize" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fd" type="org.apache.hadoop.fs.PathHandle"/>
+      <param name="bufferSize" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="bufferSize" type="int"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createOutputStream" return="java.io.OutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="append" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createOutputStreamWithMode" return="java.io.OutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="append" type="boolean"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createNonRecursive" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="flags" type="java.util.EnumSet"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createNonRecursive" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="concat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="trg" type="org.apache.hadoop.fs.Path"/>
+      <param name="psrcs" type="org.apache.hadoop.fs.Path[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="rename" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="handleEmptyDstDirectoryOnWindows" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="srcFile" type="java.io.File"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <param name="dstFile" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="truncate" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="newLength" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="delete" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <param name="recursive" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Delete the given path to a file or directory.
+ @param p the path to delete
+ @param recursive to delete sub-directories
+ @return true if the file or directory and all its contents were deleted
+ @throws IOException if p is non-empty and recursive is false]]>
+      </doc>
+    </method>
+    <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}
+
+ (<b>Note</b>: Returned list is not sorted in any given order,
+ due to reliance on Java's {@link File#list()} API.)]]>
+      </doc>
+    </method>
+    <method name="exists" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="mkOneDir" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="p2f" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="mkOneDirWithMode" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <param name="p2f" type="java.io.File"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="mkdirs" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Creates the specified directory hierarchy. Does not
+ treat existence as an error.]]>
+      </doc>
+    </method>
+    <method name="mkdirs" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setWorkingDirectory"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Set the working directory to the given directory.]]>
+      </doc>
+    </method>
+    <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getInitialWorkingDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStatus" return="org.apache.hadoop.fs.FsStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="moveFromLocalFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="startLocalOutput" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/>
+      <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="completeLocalOutput"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fsWorkingFile" type="org.apache.hadoop.fs.Path"/>
+      <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setOwner"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <param name="username" type="java.lang.String"/>
+      <param name="groupname" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Use the command chown to set owner.]]>
+      </doc>
+    </method>
+    <method name="setPermission"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Use the command chmod to set permission.]]>
+      </doc>
+    </method>
+    <method name="setTimes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <param name="mtime" type="long"/>
+      <param name="atime" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Sets the {@link Path}'s last modified time and last access time to
+ the given valid times.
+
+ @param mtime the modification time to set (only if no less than zero).
+ @param atime the access time to set (only if no less than zero).
+ @throws IOException if setting the times fails.]]>
+      </doc>
+    </method>
+    <method name="createPathHandle" return="org.apache.hadoop.fs.PathHandle"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="opts" type="org.apache.hadoop.fs.Options.HandleOpt[]"/>
+      <doc>
+      <![CDATA[Hook to implement support for {@link PathHandle} operations.
+ @param stat Referent in the target FileSystem
+ @param opts Constraints that determine the validity of the
+            {@link PathHandle} reference.]]>
+      </doc>
+    </method>
+    <method name="supportsSymlinks" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createSymlink"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="target" type="org.apache.hadoop.fs.Path"/>
+      <param name="link" type="org.apache.hadoop.fs.Path"/>
+      <param name="createParent" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getFileLinkStatus" return="org.apache.hadoop.fs.FileStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return a FileStatus representing the given path. If the path refers
+ to a symlink return a FileStatus representing the link rather than
+ the object the link refers to.]]>
+      </doc>
+    </method>
+    <method name="getLinkTarget" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="hasPathCapability" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="capability" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Implement the FileSystem API for the raw local filesystem.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.RawLocalFileSystem -->
+  <!-- start class org.apache.hadoop.fs.ReadOption -->
+  <class name="ReadOption" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.fs.ReadOption[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.fs.ReadOption"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Options that can be used when reading from a FileSystem.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.ReadOption -->
+  <!-- start interface org.apache.hadoop.fs.Seekable -->
+  <interface name="Seekable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="seek"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pos" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Seek to the given offset from the start of the file.
+ The next read() will be from that location.  Can't
+ seek past the end of the file.]]>
+      </doc>
+    </method>
+    <method name="getPos" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the current offset from the start of the file]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Stream that permits seeking.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.Seekable -->
+  <!-- start class org.apache.hadoop.fs.StorageStatistics -->
+  <class name="StorageStatistics" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="StorageStatistics" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the name of this StorageStatistics object.]]>
+      </doc>
+    </method>
+    <method name="getScheme" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the associated file system scheme if this is scheme specific,
+ else return null.]]>
+      </doc>
+    </method>
+    <method name="getLongStatistics" return="java.util.Iterator"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get an iterator over all the currently tracked long statistics.
+
+ The values returned will depend on the type of FileSystem or FileContext
+ object.  The values do not necessarily reflect a snapshot in time.]]>
+      </doc>
+    </method>
+    <method name="getLong" return="java.lang.Long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value of a statistic.
+
+ @return         null if the statistic is not being tracked or is not a
+                 long statistic. The value of the statistic, otherwise.]]>
+      </doc>
+    </method>
+    <method name="isTracked" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Return true if a statistic is being tracked.
+
+ @return         True only if the statistic is being tracked.]]>
+      </doc>
+    </method>
+    <method name="reset"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Reset all the statistic data.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[StorageStatistics contains statistics data for a FileSystem or FileContext
+ instance.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.StorageStatistics -->
+  <!-- start class org.apache.hadoop.fs.StorageType -->
+  <class name="StorageType" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.fs.StorageType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.fs.StorageType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="isTransient" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="supportTypeQuota" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isMovable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="asList" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getMovableTypes" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTypesSupportingQuota" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="parseStorageType" return="org.apache.hadoop.fs.StorageType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="i" type="int"/>
+    </method>
+    <method name="parseStorageType" return="org.apache.hadoop.fs.StorageType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="s" type="java.lang.String"/>
+    </method>
+    <field name="DEFAULT" type="org.apache.hadoop.fs.StorageType"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="EMPTY_ARRAY" type="org.apache.hadoop.fs.StorageType[]"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Defines the types of supported storage media. The default storage
+ medium is assumed to be DISK.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.StorageType -->
+  <!-- start interface org.apache.hadoop.fs.StreamCapabilities -->
+  <interface name="StreamCapabilities"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="hasCapability" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="capability" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Query the stream for a specific capability.
+
+ @param capability string to query the stream support for.
+ @return True if the stream supports capability.]]>
+      </doc>
+    </method>
+    <field name="HFLUSH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Stream hflush capability implemented by {@link Syncable#hflush()}.
+
+ Use the {@link #HSYNC} probe to check for the support of Syncable;
+ it's that presence of {@code hsync()} which matters.]]>
+      </doc>
+    </field>
+    <field name="HSYNC" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Stream hsync capability implemented by {@link Syncable#hsync()}.]]>
+      </doc>
+    </field>
+    <field name="READAHEAD" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Stream setReadahead capability implemented by
+ {@link CanSetReadahead#setReadahead(Long)}.]]>
+      </doc>
+    </field>
+    <field name="DROPBEHIND" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Stream setDropBehind capability implemented by
+ {@link CanSetDropBehind#setDropBehind(Boolean)}.]]>
+      </doc>
+    </field>
+    <field name="UNBUFFER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Stream unbuffer capability implemented by {@link CanUnbuffer#unbuffer()}.]]>
+      </doc>
+    </field>
+    <field name="READBYTEBUFFER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Stream read(ByteBuffer) capability implemented by
+ {@link ByteBufferReadable#read(java.nio.ByteBuffer)}.]]>
+      </doc>
+    </field>
+    <field name="PREADBYTEBUFFER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Stream read(long, ByteBuffer) capability implemented by
+ {@link ByteBufferPositionedReadable#read(long, java.nio.ByteBuffer)}.]]>
+      </doc>
+    </field>
+    <field name="IOSTATISTICS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[IOStatisticsSource API.]]>
+      </doc>
+    </field>
+    <field name="ABORTABLE_STREAM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Stream abort() capability implemented by {@link Abortable#abort()}.
+ This matches the Path Capability
+ {@link CommonPathCapabilities#ABORTABLE_STREAM}.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[Interface to query streams for supported capabilities.
+
+ Capability strings must be in lower case.
+
+ Constant strings are chosen over enums in order to allow other file systems
+ to define their own capabilities.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.StreamCapabilities -->
+  <!-- start class org.apache.hadoop.fs.StreamCapabilitiesPolicy -->
+  <class name="StreamCapabilitiesPolicy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="StreamCapabilitiesPolicy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="unbuffer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <doc>
+      <![CDATA[Implement the policy for {@link CanUnbuffer#unbuffer()}.
+
+ @param in the input stream]]>
+      </doc>
+    </method>
+    <field name="CAN_UNBUFFER_NOT_IMPLEMENTED_MESSAGE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Static methods to implement policies for {@link StreamCapabilities}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.StreamCapabilitiesPolicy -->
+  <!-- start interface org.apache.hadoop.fs.Syncable -->
+  <interface name="Syncable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="hflush"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Flush out the data in client's user buffer. After the return of
+ this call, new readers will see the data.
+ @throws IOException if any error occurs]]>
+      </doc>
+    </method>
+    <method name="hsync"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Similar to posix fsync, flush out the data in client's user buffer 
+ all the way to the disk device (but the disk may have it in its cache).
+ @throws IOException if error occurs]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the interface for flush/sync operations.
+ Consult the Hadoop filesystem specification for the definition of the
+ semantics of these operations.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.Syncable -->
+  <!-- start class org.apache.hadoop.fs.Trash -->
+  <class name="Trash" extends="org.apache.hadoop.conf.Configured"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Trash" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct a trash can accessor.
+ @param conf a Configuration]]>
+      </doc>
+    </constructor>
+    <constructor name="Trash" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct a trash can accessor for the FileSystem provided.
+ @param fs the FileSystem
+ @param conf a Configuration]]>
+      </doc>
+    </constructor>
+    <method name="moveToAppropriateTrash" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[In case of the symlinks or mount points, one has to move the appropriate
+ trashbin in the actual volume of the path p being deleted.
+
+ Hence we get the file system of the fully-qualified resolved-path and
+ then move the path p to the trashbin in that volume,
+ @param fs - the filesystem of path p
+ @param p - the  path being deleted - to be moved to trasg
+ @param conf - configuration
+ @return false if the item is already in the trash or trash is disabled
+ @throws IOException on error]]>
+      </doc>
+    </method>
+    <method name="isEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns whether the trash is enabled for this filesystem]]>
+      </doc>
+    </method>
+    <method name="moveToTrash" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Move a file or directory to the current trash directory.
+ @return false if the item is already in the trash or trash is disabled]]>
+      </doc>
+    </method>
+    <method name="checkpoint"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a trash checkpoint.]]>
+      </doc>
+    </method>
+    <method name="expunge"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Delete old checkpoint(s).]]>
+      </doc>
+    </method>
+    <method name="expungeImmediately"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Delete all trash immediately.]]>
+      </doc>
+    </method>
+    <method name="getEmptier" return="java.lang.Runnable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return a {@link Runnable} that periodically empties the trash of all
+ users, intended to be run by the superuser.]]>
+      </doc>
+    </method>
+    <method name="getCurrentTrashDir" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Provides a trash facility which supports pluggable Trash policies. 
+
+ See the implementation of the configured TrashPolicy for more
+ details.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.Trash -->
+  <!-- start class org.apache.hadoop.fs.TrashPolicy -->
+  <class name="TrashPolicy" extends="org.apache.hadoop.conf.Configured"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TrashPolicy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="initialize"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #initialize(Configuration, FileSystem)} instead.">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="home" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Used to setup the trash policy. Must be implemented by all TrashPolicy
+ implementations.
+ @param conf the configuration to be used
+ @param fs the filesystem to be used
+ @param home the home directory
+ @deprecated Use {@link #initialize(Configuration, FileSystem)} instead.]]>
+      </doc>
+    </method>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <doc>
+      <![CDATA[Used to setup the trash policy. Must be implemented by all TrashPolicy
+ implementations. Different from initialize(conf, fs, home), this one does
+ not assume trash always under /user/$USER due to HDFS encryption zone.
+ @param conf the configuration to be used
+ @param fs the filesystem to be used]]>
+      </doc>
+    </method>
+    <method name="isEnabled" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns whether the Trash Policy is enabled for this filesystem.]]>
+      </doc>
+    </method>
+    <method name="moveToTrash" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Move a file or directory to the current trash directory.
+ @return false if the item is already in the trash or trash is disabled]]>
+      </doc>
+    </method>
+    <method name="createCheckpoint"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a trash checkpoint.]]>
+      </doc>
+    </method>
+    <method name="deleteCheckpoint"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Delete old trash checkpoint(s).]]>
+      </doc>
+    </method>
+    <method name="deleteCheckpointsImmediately"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Delete all checkpoints immediately, ie empty trash.]]>
+      </doc>
+    </method>
+    <method name="getCurrentTrashDir" return="org.apache.hadoop.fs.Path"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current working directory of the Trash Policy
+ This API does not work with files deleted from encryption zone when HDFS
+ data encryption at rest feature is enabled as rename file between
+ encryption zones or encryption zone and non-encryption zone is not allowed.
+
+ The caller is recommend to use the new API
+ TrashPolicy#getCurrentTrashDir(Path path).
+ It returns the trash location correctly for the path specified no matter
+ the path is in encryption zone or not.]]>
+      </doc>
+    </method>
+    <method name="getCurrentTrashDir" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the current trash directory for path specified based on the Trash
+ Policy
+ @param path path to be deleted
+ @return current trash directory for the path to be deleted
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getEmptier" return="java.lang.Runnable"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return a {@link Runnable} that periodically empties the trash of all
+ users, intended to be run by the superuser.]]>
+      </doc>
+    </method>
+    <method name="getInstance" return="org.apache.hadoop.fs.TrashPolicy"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #getInstance(Configuration, FileSystem)} instead.">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="home" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Get an instance of the configured TrashPolicy based on the value
+ of the configuration parameter fs.trash.classname.
+
+ @param conf the configuration to be used
+ @param fs the file system to be used
+ @param home the home directory
+ @return an instance of TrashPolicy
+ @deprecated Use {@link #getInstance(Configuration, FileSystem)} instead.]]>
+      </doc>
+    </method>
+    <method name="getInstance" return="org.apache.hadoop.fs.TrashPolicy"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <doc>
+      <![CDATA[Get an instance of the configured TrashPolicy based on the value
+ of the configuration parameter fs.trash.classname.
+
+ @param conf the configuration to be used
+ @param fs the file system to be used
+ @return an instance of TrashPolicy]]>
+      </doc>
+    </method>
+    <field name="fs" type="org.apache.hadoop.fs.FileSystem"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="trash" type="org.apache.hadoop.fs.Path"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="deletionInterval" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This interface is used for implementing different Trash policies.
+ Provides factory method to create instances of the configured Trash policy.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.TrashPolicy -->
+  <!-- start class org.apache.hadoop.fs.UnsupportedFileSystemException -->
+  <class name="UnsupportedFileSystemException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UnsupportedFileSystemException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs exception with the specified detail message. 
+ @param message exception message.]]>
+      </doc>
+    </constructor>
+    <doc>
+    <![CDATA[File system for a given file system name/scheme is not supported]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.UnsupportedFileSystemException -->
+  <!-- start class org.apache.hadoop.fs.UnsupportedMultipartUploaderException -->
+  <class name="UnsupportedMultipartUploaderException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UnsupportedMultipartUploaderException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs exception with the specified detail message.
+
+ @param message exception message.]]>
+      </doc>
+    </constructor>
+    <doc>
+    <![CDATA[MultipartUploader for a given file system name/scheme is not supported.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.UnsupportedMultipartUploaderException -->
+  <!-- start interface org.apache.hadoop.fs.UploadHandle -->
+  <interface name="UploadHandle"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Serializable"/>
+    <method name="toByteArray" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Serialized from in bytes.]]>
+      </doc>
+    </method>
+    <method name="bytes" return="java.nio.ByteBuffer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <doc>
+    <![CDATA[Opaque, serializable reference to an uploadId for multipart uploads.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.UploadHandle -->
+  <!-- start class org.apache.hadoop.fs.XAttrCodec -->
+  <class name="XAttrCodec" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.fs.XAttrCodec[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.fs.XAttrCodec"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="decodeValue" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Decode string representation of a value and check whether it's 
+ encoded. If the given string begins with 0x or 0X, it expresses
+ a hexadecimal number. If the given string begins with 0s or 0S,
+ base64 encoding is expected. If the given string is enclosed in 
+ double quotes, the inner string is treated as text. Otherwise 
+ the given string is treated as text. 
+ @param value string representation of the value.
+ @return byte[] the value
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="encodeValue" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="byte[]"/>
+      <param name="encoding" type="org.apache.hadoop.fs.XAttrCodec"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Encode byte[] value to string representation with encoding. 
+ Values encoded as text strings are enclosed in double quotes (\"), 
+ while strings encoded as hexadecimal and base64 are prefixed with 
+ 0x and 0s, respectively.
+ @param value byte[] value
+ @param encoding
+ @return String string representation of value
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The value of <code>XAttr</code> is byte[], this class is to 
+ covert byte[] to some kind of string representation or convert back.
+ String representation is convenient for display and input. For example
+ display in screen as shell response and json response, input as http
+ or shell parameter.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.XAttrCodec -->
+  <!-- start class org.apache.hadoop.fs.XAttrSetFlag -->
+  <class name="XAttrSetFlag" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.fs.XAttrSetFlag[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.fs.XAttrSetFlag"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="validate"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="xAttrName" type="java.lang.String"/>
+      <param name="xAttrExists" type="boolean"/>
+      <param name="flag" type="java.util.EnumSet"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.fs.XAttrSetFlag -->
+  <doc>
+  <![CDATA[An abstract file system API.]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.fs.audit">
+  <!-- start class org.apache.hadoop.fs.audit.CommonAuditContext -->
+  <class name="CommonAuditContext" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="put" return="java.util.function.Supplier"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Put a context entry.
+ @param key key
+ @param value new value
+ @return old value or null]]>
+      </doc>
+    </method>
+    <method name="put" return="java.util.function.Supplier"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="java.util.function.Supplier"/>
+      <doc>
+      <![CDATA[Put a context entry dynamically evaluated on demand.
+ @param key key
+ @param value new value
+ @return old value or null]]>
+      </doc>
+    </method>
+    <method name="remove"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Remove a context entry.
+ @param key key]]>
+      </doc>
+    </method>
+    <method name="get" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get a context entry.
+ @param key key
+ @return value or null]]>
+      </doc>
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Rest the context; will set the standard options again.
+ Primarily for testing.]]>
+      </doc>
+    </method>
+    <method name="containsKey" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Does the context contain a specific key?
+ @param key key
+ @return true if it is in the context.]]>
+      </doc>
+    </method>
+    <method name="currentAuditContext" return="org.apache.hadoop.fs.audit.CommonAuditContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current common audit context. Thread local.
+ @return the audit context of this thread.]]>
+      </doc>
+    </method>
+    <method name="currentThreadID" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A thread ID which is unique for this process and shared across all
+ S3A clients on the same thread, even those using different FS instances.
+ @return a thread ID for reporting.]]>
+      </doc>
+    </method>
+    <method name="getEvaluatedEntries" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the evaluated operations.
+ This is the map unique to this context.
+ @return the operations map.]]>
+      </doc>
+    </method>
+    <method name="setGlobalContextEntry"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set a global entry.
+ @param key key
+ @param value value]]>
+      </doc>
+    </method>
+    <method name="getGlobalContextEntry" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get a global entry.
+ @param key key
+ @return value or null]]>
+      </doc>
+    </method>
+    <method name="removeGlobalContextEntry"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Remove a global entry.
+ @param key key to clear.]]>
+      </doc>
+    </method>
+    <method name="noteEntryPoint"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tool" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Add the entry point as a context entry with the key
+ {@link AuditConstants#PARAM_COMMAND}
+ if it has not  already been recorded.
+ This is called via ToolRunner but may be used at any
+ other entry point.
+ @param tool object loaded/being launched.]]>
+      </doc>
+    </method>
+    <method name="getGlobalContextEntries" return="java.lang.Iterable"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get an iterator over the global entries.
+ Thread safe.
+ @return an iterable to enumerate the values.]]>
+      </doc>
+    </method>
+    <field name="PROCESS_ID" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Process ID; currently built from UUID and timestamp.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[The common audit context is a map of common context information
+ which can be used with any audit span.
+ This context is shared across all Filesystems within the
+ thread.
+ Audit spans will be created with a reference to the current
+ context of their thread;
+ That reference is retained even as they are moved across threads, so
+ context information (including thread ID Java runtime).
+
+ The Global context entries are a set of key-value pairs which span
+ all threads; the {@code HttpReferrerAuditHeader} picks these
+ up automatically. It is intended for minimal use of
+ shared constant values (process ID, entry point).
+
+ An attribute set in {@link #setGlobalContextEntry(String, String)}
+ will be set across all audit spans in all threads.
+
+ The {@link #noteEntryPoint(Object)} method should be
+ used in entry points (ToolRunner.run, etc). It extracts
+ the final element of the classname and attaches that
+ to the global context with the attribute key
+ {@link AuditConstants#PARAM_COMMAND}, if not already
+ set.
+ This helps identify the application being executued.
+
+ All other values set are specific to this context, which
+ is thread local.
+ The attributes which can be added to ths common context include
+ evaluator methods which will be evaluated in whichever thread
+ invokes {@link #getEvaluatedEntries()} and then evaluates them.
+ That map of evaluated options may evaluated later, in a different
+ thread.
+
+ For setting and clearing thread-level options, use
+ {@link #currentAuditContext()} to get the thread-local
+ context for the caller, which can then be manipulated.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.audit.CommonAuditContext -->
+</package>
+<package name="org.apache.hadoop.fs.crypto">
+</package>
+<package name="org.apache.hadoop.fs.ftp">
+  <!-- start class org.apache.hadoop.fs.ftp.FTPException -->
+  <class name="FTPException" extends="java.lang.RuntimeException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FTPException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FTPException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FTPException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[A class to wrap a {@link Throwable} into a Runtime Exception.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.ftp.FTPException -->
+  <!-- start class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+  <class name="FTPFileSystem" extends="org.apache.hadoop.fs.FileSystem"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FTPFileSystem"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getScheme" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the protocol scheme for the FileSystem.
+ <p>
+
+ @return <code>ftp</code>]]>
+      </doc>
+    </method>
+    <method name="getDefaultPort" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the default port for this FTPFileSystem.
+
+ @return the default port]]>
+      </doc>
+    </method>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="bufferSize" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <param name="overwrite" type="boolean"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[A stream obtained via this call must be closed before using other APIs of
+ this class or else the invocation will block.]]>
+      </doc>
+    </method>
+    <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="f" type="org.apache.hadoop.fs.Path"/>
+      <param name="bufferSize" type="int"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This optional operation is not yet supported.]]>
+      </doc>
+    </method>
+    <method name="delete" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="recursive" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getUri" return="java.net.URI"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="mkdirs" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="rename" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="dst" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setWorkingDirectory"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newDir" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_BUFFER_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_BLOCK_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMEOUT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FS_FTP_USER_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FS_FTP_HOST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FS_FTP_HOST_PORT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FS_FTP_PASSWORD_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FS_FTP_DATA_CONNECTION_MODE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FS_FTP_TRANSFER_MODE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="E_SAME_DIRECTORY_ONLY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FS_FTP_TIMEOUT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[<p>
+ A {@link FileSystem} backed by an FTP client provided by <a
+ href="http://commons.apache.org/net/">Apache Commons Net</a>.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.ftp.FTPFileSystem -->
+</package>
+<package name="org.apache.hadoop.fs.http">
+</package>
+<package name="org.apache.hadoop.fs.protocolPB">
+</package>
+<package name="org.apache.hadoop.fs.sftp">
+</package>
+<package name="org.apache.hadoop.fs.shell.find">
+</package>
+<package name="org.apache.hadoop.fs.statistics">
+  <!-- start class org.apache.hadoop.fs.statistics.DurationStatisticSummary -->
+  <class name="DurationStatisticSummary" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Serializable"/>
+    <constructor name="DurationStatisticSummary" type="java.lang.String, boolean, long, long, long, org.apache.hadoop.fs.statistics.MeanStatistic"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor.
+ @param key Statistic key.
+ @param success Are these success or failure statistics.
+ @param count Count of operation invocations.
+ @param max Max duration; -1 if unknown.
+ @param min Min duration; -1 if unknown.
+ @param mean Mean duration -may be null. (will be cloned)]]>
+      </doc>
+    </constructor>
+    <method name="getKey" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isSuccess" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getCount" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getMax" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getMin" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getMean" return="org.apache.hadoop.fs.statistics.MeanStatistic"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="fetchDurationSummary" return="org.apache.hadoop.fs.statistics.DurationStatisticSummary"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="source" type="org.apache.hadoop.fs.statistics.IOStatistics"/>
+      <param name="key" type="java.lang.String"/>
+      <param name="success" type="boolean"/>
+      <doc>
+      <![CDATA[Fetch the duration timing summary of success or failure operations
+ from an IO Statistics source.
+ If the duration key is unknown, the summary will be incomplete.
+ @param source source of data
+ @param key duration statistic key
+ @param success fetch success statistics, or if false, failure stats.
+ @return a summary of the statistics.]]>
+      </doc>
+    </method>
+    <method name="fetchSuccessSummary" return="org.apache.hadoop.fs.statistics.DurationStatisticSummary"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="source" type="org.apache.hadoop.fs.statistics.IOStatistics"/>
+      <param name="key" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Fetch the duration timing summary from an IOStatistics source.
+ If the duration key is unknown, the summary will be incomplete.
+ @param source source of data
+ @param key duration statistic key
+ @return a summary of the statistics.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Summary of duration tracking statistics
+ as extracted from an IOStatistics instance.
+ <p>
+ This is for reporting and testing.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.statistics.DurationStatisticSummary -->
+  <!-- start interface org.apache.hadoop.fs.statistics.IOStatistics -->
+  <interface name="IOStatistics"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="counters" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Map of counters.
+ @return the current map of counters.]]>
+      </doc>
+    </method>
+    <method name="gauges" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Map of gauges.
+ @return the current map of gauges.]]>
+      </doc>
+    </method>
+    <method name="minimums" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Map of minimums.
+ @return the current map of minimums.]]>
+      </doc>
+    </method>
+    <method name="maximums" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Map of maximums.
+ @return the current map of maximums.]]>
+      </doc>
+    </method>
+    <method name="meanStatistics" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Map of meanStatistics.
+ @return the current map of MeanStatistic statistics.]]>
+      </doc>
+    </method>
+    <field name="MIN_UNSET_VALUE" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Value when a minimum value has never been set.]]>
+      </doc>
+    </field>
+    <field name="MAX_UNSET_VALUE" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Value when a max value has never been set.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[IO Statistics.
+ <p>
+ These are low-cost per-instance statistics provided by any Hadoop
+ I/O class instance.
+ <p>
+ Consult the filesystem specification document for the requirements
+ of an implementation of this interface.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.statistics.IOStatistics -->
+  <!-- start interface org.apache.hadoop.fs.statistics.IOStatisticsAggregator -->
+  <interface name="IOStatisticsAggregator"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="aggregate" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="statistics" type="org.apache.hadoop.fs.statistics.IOStatistics"/>
+      <doc>
+      <![CDATA[Aggregate the supplied statistics into the current
+ set.
+
+ @param statistics statistics; may be null
+ @return true if the statistics reference was not null and
+ so aggregated.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface exported by classes which support
+ aggregation of {@link IOStatistics}.
+ Implementations MAY aggregate all statistics
+ exported by the IOStatistics reference passed in to
+ {@link #aggregate(IOStatistics)}, or they
+ may selectively aggregate specific values/classes
+ of statistics.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.fs.statistics.IOStatisticsAggregator -->
+  <!-- start class org.apache.hadoop.fs.statistics.IOStatisticsLogging -->
+  <class name="IOStatisticsLogging" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="ioStatisticsSourceToString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="source" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Extract the statistics from a source object -or ""
+ if it is not an instance of {@link IOStatistics},
+ {@link IOStatisticsSource} or the retrieved
+ statistics are null.
+ <p>
+ Exceptions are caught and downgraded to debug logging.
+ @param source source of statistics.
+ @return a string for logging.]]>
+      </doc>
+    </method>
+    <method name="ioStatisticsToString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="statistics" type="org.apache.hadoop.fs.statistics.IOStatistics"/>
+      <doc>
+      <![CDATA[Convert IOStatistics to a string form.
+ @param statistics A statistics instance.
+ @return string value or the empty string if null]]>
+      </doc>
+    </method>
+    <method name="ioStatisticsToPrettyString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="statistics" type="org.apache.hadoop.fs.statistics.IOStatistics"/>
+      <doc>
+      <![CDATA[Convert IOStatistics to a string form, with all the metrics sorted
+ and empty value stripped.
+ This is more expensive than the simple conversion, so should only
+ be used for logging/output where it's known/highly likely that the
+ caller wants to see the values. Not for debug logging.
+ @param statistics A statistics instance.
+ @return string value or the empty string if null]]>
+      </doc>
+    </method>
+    <method name="demandStringifyIOStatisticsSource" return="java.lang.Object"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="source" type="org.apache.hadoop.fs.statistics.IOStatisticsSource"/>
+      <doc>
+      <![CDATA[On demand stringifier of an IOStatisticsSource instance.
+ <p>
+ Whenever this object's toString() method is called, it evaluates the
+ statistics.
+ <p>
+ This is designed to affordable to use in log statements.
+ @param source source of statistics -may be null.
+ @return an object whose toString() operation returns the current values.]]>
+      </doc>
+    </method>
+    <method name="demandStringifyIOStatistics" return="java.lang.Object"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="statistics" type="org.apache.hadoop.fs.statistics.IOStatistics"/>
+      <doc>
+      <![CDATA[On demand stringifier of an IOStatistics instance.
+ <p>
+ Whenever this object's toString() method is called, it evaluates the
+ statistics.
+ <p>
+ This is for use in log statements where for the cost of creation
+ of this entry is low; it is affordable to use in log statements.
+ @param statistics statistics to stringify -may be null.
+ @return an object whose toString() operation returns the current values.]]>
+      </doc>
+    </method>
+    <method name="logIOStatisticsAtDebug"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="log" type="org.slf4j.Logger"/>
+      <param name="message" type="java.lang.String"/>
+      <param name="source" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Extract any statistics from the source and log at debug, if
+ the log is set to log at debug.
+ No-op if logging is not at debug or the source is null/of
+ the wrong type/doesn't provide statistics.
+ @param log log to log to
+ @param message message for log -this must contain "{}" for the
+ statistics report to actually get logged.
+ @param source source object]]>
+      </doc>
+    </method>
+    <method name="logIOStatisticsAtDebug"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="message" type="java.lang.String"/>
+      <param name="source" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Extract any statistics from the source and log to
+ this class's log at debug, if
+ the log is set to log at debug.
+ No-op if logging is not at debug or the source is null/of
+ the wrong type/doesn't provide statistics.
+ @param message message for log -this must contain "{}" for the
+ statistics report to actually get logged.
+ @param source source object]]>
+      </doc>
+    </method>
+    <method name="logIOStatisticsAtLevel"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="log" type="org.slf4j.Logger"/>
+      <param name="level" type="java.lang.String"/>
+      <param name="source" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[A method to log IOStatistics from a source at different levels.
+
+ @param log    Logger for logging.
+ @param level  LOG level.
+ @param source Source to LOG.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Utility operations convert IO Statistics sources/instances
+ to strings, especially for robustly logging.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.statistics.IOStatisticsLogging -->
+  <!-- start class org.apache.hadoop.fs.statistics.IOStatisticsSnapshot -->
+  <class name="IOStatisticsSnapshot" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.fs.statistics.IOStatistics"/>
+    <implements name="java.io.Serializable"/>
+    <implements name="org.apache.hadoop.fs.statistics.IOStatisticsAggregator"/>
+    <constructor name="IOStatisticsSnapshot"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct.]]>
+      </doc>
+    </constructor>
+    <constructor name="IOStatisticsSnapshot" type="org.apache.hadoop.fs.statistics.IOStatistics"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct, taking a snapshot of the source statistics data
+ if the source is non-null.
+ If the source is null, the empty maps are created
+ @param source statistics source. Nullable.]]>
+      </doc>
+    </constructor>
+    <method name="clear"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Clear all the maps.]]>
+      </doc>
+    </method>
+    <method name="snapshot"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="source" type="org.apache.hadoop.fs.statistics.IOStatistics"/>
+      <doc>
+      <![CDATA[Take a snapshot.
+
+ This completely overwrites the map data with the statistics
+ from the source.
+ @param source statistics source.]]>
+      </doc>
+    </method>
+    <method name="aggregate" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="source" type="org.apache.hadoop.fs.statistics.IOStatistics"/>
+      <doc>
+      <![CDATA[Aggregate the current statistics with the
+ source reference passed in.
+
+ The operation is synchronized.
+ @param source source; may be null
+ @return true if a merge took place.]]>
+      </doc>
+    </method>
+    <method name="counters" return="java.util.Map"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="gauges" return="java.util.Map"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="minimums" return="java.util.Map"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="maximums" return="java.util.Map"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="meanStatistics" return="java.util.Map"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="serializer" return="org.apache.hadoop.util.JsonSerialization"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a JSON serializer for this class.
+ @return a serializer.]]>
+      </doc>
+    </method>
+    <method name="requiredSerializationClasses" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[What classes are needed to deserialize this class?
+ Needed to securely unmarshall this from untrusted sources.
+ @return a list of required classes to deserialize the data.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Snapshot of statistics from a different source.
+ <p>
+ It is serializable so that frameworks which can use java serialization
+ to propagate data (Spark, Flink...) can send the statistics
+ back. For this reason, TreeMaps are explicitly used as field types,
+ even though IDEs can recommend use of Map instead.
+ For security reasons, untrusted java object streams should never be
+ deserialized. If for some reason this is required, use
+ {@link #requiredSerializationClasses()} to get the list of classes
+ used when deserializing instances of this object.
+ <p>
+ <p>
+ It is annotated for correct serializations with jackson2.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.statistics.IOStatisticsSnapshot -->
+  <!-- start class org.apache.hadoop.fs.statistics.IOStatisticsSupport -->
+  <class name="IOStatisticsSupport" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="snapshotIOStatistics" return="org.apache.hadoop.fs.statistics.IOStatisticsSnapshot"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="statistics" type="org.apache.hadoop.fs.statistics.IOStatistics"/>
+      <doc>
+      <![CDATA[Take a snapshot of the current statistics state.
+ <p>
+ This is not an atomic option.
+ <p>
+ The instance can be serialized, and its
+ {@code toString()} method lists all the values.
+ @param statistics statistics
+ @return a snapshot of the current values.]]>
+      </doc>
+    </method>
+    <method name="snapshotIOStatistics" return="org.apache.hadoop.fs.statistics.IOStatisticsSnapshot"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a snapshot statistics instance ready to aggregate data.
+
+ The instance can be serialized, and its
+ {@code toString()} method lists all the values.
+ @return an empty snapshot]]>
+      </doc>
+    </method>
+    <method name="retrieveIOStatistics" return="org.apache.hadoop.fs.statistics.IOStatistics"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="source" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Get the IOStatistics of the source, casting it
+ if it is of the relevant type, otherwise,
+ if it implements {@link IOStatisticsSource}
+ extracting the value.
+
+ Returns null if the source isn't of the write type
+ or the return value of
+ {@link IOStatisticsSource#getIOStatistics()} was null.
+ @return an IOStatistics instance or null]]>
+      </doc>
+    </method>
+    <method name="stubDurationTrackerFactory" return="org.apache.hadoop.fs.statistics.DurationTrackerFactory"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return a stub duration tracker factory whose returned trackers
+ are always no-ops.
+
+ As singletons are returned, this is very low-cost to use.
+ @return a duration tracker factory.]]>
+      </doc>
+    </method>
+    <method name="stubDurationTracker" return="org.apache.hadoop.fs.statistics.DurationTracker"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a stub duration tracker.
+ @return a stub tracker.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Support for working with IOStatistics.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.statistics.IOStatisticsSupport -->
+  <!-- start class org.apache.hadoop.fs.statistics.MeanStatistic -->
+  <class name="MeanStatistic" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Serializable"/>
+    <implements name="java.lang.Cloneable"/>
+    <constructor name="MeanStatistic" type="long, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor, with some resilience against invalid sample counts.
+ If the sample count is 0 or less, the sum is set to 0 and
+ the sample count to 0.
+ @param samples sample count.
+ @param sum sum value]]>
+      </doc>
+    </constructor>
+    <constructor name="MeanStatistic" type="org.apache.hadoop.fs.statistics.MeanStatistic"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create from another statistic.
+ @param that source]]>
+      </doc>
+    </constructor>
+    <constructor name="MeanStatistic"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an empty statistic.]]>
+      </doc>
+    </constructor>
+    <method name="getSum" return="long"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the sum of samples.
+ @return the sum]]>
+      </doc>
+    </method>
+    <method name="getSamples" return="long"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the sample count.
+ @return the sample count; 0 means empty]]>
+      </doc>
+    </method>
+    <method name="isEmpty" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Is a statistic empty?
+ @return true if the sample count is 0]]>
+      </doc>
+    </method>
+    <method name="clear"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Set the values to 0.]]>
+      </doc>
+    </method>
+    <method name="setSamplesAndSum"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="sampleCount" type="long"/>
+      <param name="newSum" type="long"/>
+      <doc>
+      <![CDATA[Set the sum and samples.
+ Synchronized.
+ @param sampleCount new sample count.
+ @param newSum new sum]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.fs.statistics.MeanStatistic"/>
+      <doc>
+      <![CDATA[Set the statistic to the values of another.
+ Synchronized.
+ @param other the source.]]>
+      </doc>
+    </method>
+    <method name="setSum"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="sum" type="long"/>
+      <doc>
+      <![CDATA[Set the sum.
+ @param sum new sum]]>
+      </doc>
+    </method>
+    <method name="setSamples"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="samples" type="long"/>
+      <doc>
+      <![CDATA[Set the sample count.
+
+ If this is less than zero, it is set to zero.
+ This stops an ill-formed JSON entry from
+ breaking deserialization, or get an invalid sample count
+ into an entry.
+ @param samples sample count.]]>
+      </doc>
+    </method>
+    <method name="mean" return="double"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the arithmetic mean value.
+ @return the mean]]>
+      </doc>
+    </method>
+    <method name="add" return="org.apache.hadoop.fs.statistics.MeanStatistic"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.fs.statistics.MeanStatistic"/>
+      <doc>
+      <![CDATA[Add another MeanStatistic.
+ @param other other value]]>
+      </doc>
+    </method>
+    <method name="addSample"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Add a sample.
+ Thread safe.
+ @param value value to add to the sum]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The hash code is derived from the mean
+ and sample count: if either is changed
+ the statistic cannot be used as a key
+ for hash tables/maps.
+ @return a hash value]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="clone" return="org.apache.hadoop.fs.statistics.MeanStatistic"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="copy" return="org.apache.hadoop.fs.statistics.MeanStatistic"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a copy of this instance.
+ @return copy.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A mean statistic represented as the sum and the sample count;
+ the mean is calculated on demand.
+ <p>
+ It can be used to accrue values so as to dynamically update
+ the mean. If so, know that there is no synchronization
+ on the methods.
+ </p>
+ <p>
+ If a statistic has 0 samples then it is considered to be empty.
+ </p>
+ <p>
+ All 'empty' statistics are equivalent, independent of the sum value.
+ </p>
+ <p>
+ For non-empty statistics, sum and sample values must match
+ for equality.
+ </p>
+ <p>
+ It is serializable and annotated for correct serializations with jackson2.
+ </p>
+ <p>
+ Thread safety. The operations to add/copy sample data, are thread safe.
+ </p>
+ <ol>
+   <li>{@link #add(MeanStatistic)}</li>
+   <li>{@link #addSample(long)} </li>
+   <li>{@link #clear()} </li>
+   <li>{@link #setSamplesAndSum(long, long)}</li>
+   <li>{@link #set(MeanStatistic)}</li>
+   <li>{@link #setSamples(long)} and {@link #setSum(long)}</li>
+ </ol>
+ <p>
+ So is the {@link #mean()} method. This ensures that when
+ used to aggregated statistics, the aggregate value and sample
+ count are set and evaluated consistently.
+ </p>
+ <p>
+   Other methods marked as synchronized because Findbugs overreacts
+   to the idea that some operations to update sum and sample count
+   are synchronized, but that things like equals are not.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.statistics.MeanStatistic -->
+  <!-- start class org.apache.hadoop.fs.statistics.StoreStatisticNames -->
+  <class name="StoreStatisticNames" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <field name="OP_ABORT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_ACCESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[access() API call {@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_APPEND" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_COPY_FROM_LOCAL_FILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_CREATE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_CREATE_NON_RECURSIVE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_DELETE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_EXISTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_GET_CONTENT_SUMMARY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_GET_DELEGATION_TOKEN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_GET_FILE_CHECKSUM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_GET_FILE_STATUS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_GET_STATUS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_GLOB_STATUS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_IS_FILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_HFLUSH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_HSYNC" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_IS_DIRECTORY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_LIST_FILES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_LIST_LOCATED_STATUS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_LIST_STATUS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_MKDIRS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_MODIFY_ACL_ENTRIES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_OPEN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_REMOVE_ACL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_REMOVE_ACL_ENTRIES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_REMOVE_DEFAULT_ACL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_RENAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_SET_ACL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_SET_OWNER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_SET_PERMISSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_SET_TIMES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_TRUNCATE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_XATTR_GET_MAP" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Invoke {@code getXAttrs(Path path)}: {@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_XATTR_GET_NAMED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Invoke {@code getXAttr(Path, String)}: {@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_XATTR_GET_NAMED_MAP" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Invoke {@code getXAttrs(Path path, List<String> names)}: {@value}.]]>
+      </doc>
+    </field>
+    <field name="OP_XATTR_LIST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Invoke {@code listXAttrs(Path path)}: {@value}.]]>
+      </doc>
+    </field>
+    <field name="DELEGATION_TOKENS_ISSUED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@value}.]]>
+      </doc>
+    </field>
+    <field name="STORE_EXISTS_PROBE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Probe for store existing: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STORE_IO_THROTTLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Requests throttled and retried: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STORE_IO_REQUEST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Requests made of a store: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STORE_IO_RETRY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[IO retried: {@value}.]]>
+      </doc>
+    </field>
+    <field name="OBJECT_LIST_REQUEST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A store's equivalent of a paged LIST request was initiated: {@value}.]]>
+      </doc>
+    </field>
+    <field name="OBJECT_CONTINUE_LIST_REQUEST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of continued object listings made.
+ Value :{@value}.]]>
+      </doc>
+    </field>
+    <field name="OBJECT_BULK_DELETE_REQUEST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A bulk DELETE request was made: {@value}.
+ A separate statistic from {@link #OBJECT_DELETE_REQUEST}
+ so that metrics on duration of the operations can
+ be distinguished.]]>
+      </doc>
+    </field>
+    <field name="OBJECT_DELETE_REQUEST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A store's equivalent of a DELETE request was made: {@value}.
+ This may be an HTTP DELETE verb, or it may be some custom
+ operation which takes a list of objects to delete.]]>
+      </doc>
+    </field>
+    <field name="OBJECT_DELETE_OBJECTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The count of objects deleted in delete requests.]]>
+      </doc>
+    </field>
+    <field name="OBJECT_MULTIPART_UPLOAD_INITIATED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Object multipart upload initiated.
+ Value :{@value}.]]>
+      </doc>
+    </field>
+    <field name="OBJECT_MULTIPART_UPLOAD_ABORTED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Object multipart upload aborted.
+ Value :{@value}.]]>
+      </doc>
+    </field>
+    <field name="OBJECT_PUT_REQUEST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Object put/multipart upload count.
+ Value :{@value}.]]>
+      </doc>
+    </field>
+    <field name="OBJECT_PUT_REQUEST_COMPLETED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Object put/multipart upload completed count.
+ Value :{@value}.]]>
+      </doc>
+    </field>
+    <field name="OBJECT_PUT_REQUEST_ACTIVE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Current number of active put requests.
+ Value :{@value}.]]>
+      </doc>
+    </field>
+    <field name="OBJECT_PUT_BYTES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[number of bytes uploaded.
+ Value :{@value}.]]>
+      </doc>
+    </field>
+    <field name="OBJECT_PUT_BYTES_PENDING" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[number of bytes queued for upload/being actively uploaded.
+ Value :{@value}.]]>
+      </doc>
+    </field>
+    <field name="OBJECT_SELECT_REQUESTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of S3 Select (or similar) requests issued.
+ Value :{@value}.]]>
+      </doc>
+    </field>
+    <field name="SUFFIX_MIN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Suffix to use for a minimum value when
+ the same key is shared across min/mean/max
+ statistics.
+
+ Value {@value}.]]>
+      </doc>
+    </field>
+    <field name="SUFFIX_MAX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Suffix to use for a maximum value when
+ the same key is shared across max/mean/max
+ statistics.
+
+ Value {@value}.]]>
+      </doc>
+    </field>
+    <field name="SUFFIX_MEAN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Suffix to use for a mean statistic value when
+ the same key is shared across mean/mean/max
+ statistics.
+
+ Value {@value}.]]>
+      </doc>
+    </field>
+    <field name="SUFFIX_FAILURES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[String to add to counters and other stats to track failures.
+ This comes before the .min/.mean//max suffixes.
+
+ Value {@value}.]]>
+      </doc>
+    </field>
+    <field name="ACTION_EXECUTOR_ACQUIRED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The name of the statistic collected for executor acquisition if
+ a duration tracker factory is passed in to the constructor.
+ {@value}.]]>
+      </doc>
+    </field>
+    <field name="ACTION_HTTP_HEAD_REQUEST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[An HTTP HEAD request was made: {@value}.]]>
+      </doc>
+    </field>
+    <field name="ACTION_HTTP_GET_REQUEST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[An HTTP GET request was made: {@value}.]]>
+      </doc>
+    </field>
+    <field name="ACTION_HTTP_DELETE_REQUEST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[An HTTP DELETE request was made: {@value}.]]>
+      </doc>
+    </field>
+    <field name="ACTION_HTTP_PUT_REQUEST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[An HTTP PUT request was made: {@value}.]]>
+      </doc>
+    </field>
+    <field name="ACTION_HTTP_PATCH_REQUEST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[An HTTP PATCH request was made: {@value}.]]>
+      </doc>
+    </field>
+    <field name="ACTION_HTTP_POST_REQUEST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[An HTTP POST request was made: {@value}.]]>
+      </doc>
+    </field>
+    <field name="OBJECT_METADATA_REQUESTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[An HTTP HEAD request was made: {@value}.]]>
+      </doc>
+    </field>
+    <field name="OBJECT_COPY_REQUESTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="STORE_IO_THROTTLE_RATE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MULTIPART_UPLOAD_INSTANTIATED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MULTIPART_UPLOAD_PART_PUT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MULTIPART_UPLOAD_PART_PUT_BYTES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MULTIPART_UPLOAD_ABORTED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MULTIPART_UPLOAD_ABORT_UNDER_PATH_INVOKED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MULTIPART_UPLOAD_COMPLETED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MULTIPART_UPLOAD_STARTED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MULTIPART_UPLOAD_LIST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Common statistic names for object store operations..
+ <p>
+ When adding new common statistic name constants, please make them unique.
+ By convention:
+ </p>
+ <ul>
+   <li>the name of the constants are uppercase, words separated by
+   underscores.</li>
+   <li>the value of the constants are lowercase of the constant names.</li>
+ </ul>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.statistics.StoreStatisticNames -->
+  <!-- start class org.apache.hadoop.fs.statistics.StreamStatisticNames -->
+  <class name="StreamStatisticNames" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <field name="STREAM_READ_ABORTED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of times the TCP stream was aborted.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_BYTES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Bytes read from an input stream in read() calls.
+ Does not include bytes read and then discarded in seek/close etc.
+ These are the bytes returned to the caller.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_BYTES_DISCARDED_ABORT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of bytes discarded by aborting an input stream .
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_BYTES_DISCARDED_CLOSE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of bytes read and discarded when closing an input stream.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_CLOSED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of times the TCP stream was closed.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_CLOSE_OPERATIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Total count of times an attempt to close an input stream was made
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_OPENED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Total count of times an input stream to was opened.
+ For object stores, that means the count a GET request was initiated.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_EXCEPTIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of exceptions raised during input stream reads.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_FULLY_OPERATIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of readFully() operations in an input stream.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_OPERATIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of read() operations in an input stream.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_OPERATIONS_INCOMPLETE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of incomplete read() operations in an input stream,
+ that is, when the bytes returned were less than that requested.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_VERSION_MISMATCHES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of version mismatches encountered while reading an input stream.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_SEEK_BACKWARD_OPERATIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of executed seek operations which went backwards in a stream.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_SEEK_BYTES_BACKWARDS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of bytes moved backwards during seek operations
+ in an input stream.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_SEEK_BYTES_DISCARDED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of bytes read and discarded during seek() in an input stream.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_SEEK_BYTES_SKIPPED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of bytes skipped during forward seek operations.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_SEEK_FORWARD_OPERATIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of executed seek operations which went forward in
+ an input stream.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_SEEK_POLICY_CHANGED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of times the seek policy was dynamically changed
+ in an input stream.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_SEEK_OPERATIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of seek operations in an input stream.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_SKIP_OPERATIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of {@code InputStream.skip()} calls.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_SKIP_BYTES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count bytes skipped in {@code InputStream.skip()} calls.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_TOTAL_BYTES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Total count of bytes read from an input stream.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_READ_UNBUFFERED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of calls of {@code CanUnbuffer.unbuffer()}.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_WRITE_EXCEPTIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA["Count of stream write failures reported.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_WRITE_EXCEPTIONS_COMPLETING_UPLOADS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of failures when finalizing a multipart upload:
+ {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_WRITE_BLOCK_UPLOADS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of block/partition uploads complete.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_WRITE_BLOCK_UPLOADS_ABORTED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of number of block uploads aborted.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_WRITE_BLOCK_UPLOADS_ACTIVE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of block/partition uploads active.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_WRITE_BLOCK_UPLOADS_BYTES_PENDING" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Gauge of data queued to be written.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_WRITE_BLOCK_UPLOADS_COMMITTED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of number of block uploads committed.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_WRITE_BLOCK_UPLOADS_PENDING" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Gauge of block/partitions uploads queued to be written.
+ Value: {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_WRITE_BYTES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA["Count of bytes written to output stream including all not yet uploaded.
+ {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_WRITE_TOTAL_TIME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count of total time taken for uploads to complete.
+ {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_WRITE_QUEUE_DURATION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Total queue duration of all block uploads.
+ {@value}.]]>
+      </doc>
+    </field>
+    <field name="STREAM_WRITE_TOTAL_DATA" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="BYTES_TO_UPLOAD" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of bytes to upload from an OutputStream.]]>
+      </doc>
+    </field>
+    <field name="BYTES_UPLOAD_SUCCESSFUL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of bytes uploaded successfully to the object store.]]>
+      </doc>
+    </field>
+    <field name="BYTES_UPLOAD_FAILED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of bytes failed to upload to the object store.]]>
+      </doc>
+    </field>
+    <field name="TIME_SPENT_ON_TASK_WAIT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Total time spent on waiting for a task to complete.]]>
+      </doc>
+    </field>
+    <field name="QUEUE_SHRUNK_OPS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of task queue shrunk operations.]]>
+      </doc>
+    </field>
+    <field name="WRITE_CURRENT_BUFFER_OPERATIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of times current buffer is written to the service.]]>
+      </doc>
+    </field>
+    <field name="TIME_SPENT_ON_PUT_REQUEST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Total time spent on completing a PUT request.]]>
+      </doc>
+    </field>
+    <field name="SEEK_IN_BUFFER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of seeks in buffer.]]>
+      </doc>
+    </field>
+    <field name="BYTES_READ_BUFFER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of bytes read from the buffer.]]>
+      </doc>
+    </field>
+    <field name="REMOTE_READ_OP" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Total number of remote read operations performed.]]>
+      </doc>
+    </field>
+    <field name="READ_AHEAD_BYTES_READ" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Total number of bytes read from readAhead.]]>
+      </doc>
+    </field>
+    <field name="REMOTE_BYTES_READ" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Total number of bytes read from remote operations.]]>
+      </doc>
+    </field>
+    <field name="BLOCKS_ALLOCATED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Total number of Data blocks allocated by an outputStream.]]>
+      </doc>
+    </field>
+    <field name="BLOCKS_RELEASED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Total number of Data blocks released by an outputStream.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[These are common statistic names.
+ <p>
+ When adding new common statistic name constants, please make them unique.
+ By convention, they are implicitly unique:
+ <ul>
+   <li>
+     The name of the constants are uppercase, words separated by
+     underscores.
+   </li>
+   <li>
+     The value of the constants are lowercase of the constant names.
+   </li>
+ </ul>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.fs.statistics.StreamStatisticNames -->
+</package>
+<package name="org.apache.hadoop.ha">
+  <!-- start class org.apache.hadoop.ha.BadFencingConfigurationException -->
+  <class name="BadFencingConfigurationException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="BadFencingConfigurationException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="BadFencingConfigurationException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Indicates that the operator has specified an invalid configuration
+ for fencing methods.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.ha.BadFencingConfigurationException -->
+  <!-- start class org.apache.hadoop.ha.FailoverFailedException -->
+  <class name="FailoverFailedException" extends="java.lang.Exception"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FailoverFailedException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FailoverFailedException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Exception thrown to indicate service failover has failed.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.ha.FailoverFailedException -->
+  <!-- start interface org.apache.hadoop.ha.FenceMethod -->
+  <interface name="FenceMethod"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="checkArgs"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String"/>
+      <exception name="BadFencingConfigurationException" type="org.apache.hadoop.ha.BadFencingConfigurationException"/>
+      <doc>
+      <![CDATA[Verify that the given fencing method's arguments are valid.
+ @param args the arguments provided in the configuration. This may
+        be null if the operator did not configure any arguments.
+ @throws BadFencingConfigurationException if the arguments are invalid]]>
+      </doc>
+    </method>
+    <method name="tryFence" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="target" type="org.apache.hadoop.ha.HAServiceTarget"/>
+      <param name="args" type="java.lang.String"/>
+      <exception name="BadFencingConfigurationException" type="org.apache.hadoop.ha.BadFencingConfigurationException"/>
+      <doc>
+      <![CDATA[Attempt to fence the target node.
+ @param target the target of the service to fence
+ @param args the configured arguments, which were checked at startup by
+             {@link #checkArgs(String)}
+ @return true if fencing was successful, false if unsuccessful or
+              indeterminate
+ @throws BadFencingConfigurationException if the configuration was
+         determined to be invalid only at runtime]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A fencing method is a method by which one node can forcibly prevent
+ another node from making continued progress. This might be implemented
+ by killing a process on the other node, by denying the other node's
+ access to shared storage, or by accessing a PDU to cut the other node's
+ power.
+ <p>
+ Since these methods are often vendor- or device-specific, operators
+ may implement this interface in order to achieve fencing.
+ <p>
+ Fencing is configured by the operator as an ordered list of methods to
+ attempt. Each method will be tried in turn, and the next in the list
+ will only be attempted if the previous one fails. See {@link NodeFencer}
+ for more information.
+ <p>
+ If an implementation also implements {@link Configurable} then its
+ <code>setConf</code> method will be called upon instantiation.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.ha.FenceMethod -->
+  <!-- start interface org.apache.hadoop.ha.HAServiceProtocol -->
+  <interface name="HAServiceProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="monitorHealth"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="HealthCheckFailedException" type="org.apache.hadoop.ha.HealthCheckFailedException"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Monitor the health of service. This periodically called by the HA
+ frameworks to monitor the health of the service.
+ 
+ Service is expected to perform checks to ensure it is functional.
+ If the service is not healthy due to failure or partial failure,
+ it is expected to throw {@link HealthCheckFailedException}.
+ The definition of service not healthy is left to the service.
+ 
+ Note that when health check of an Active service fails,
+ failover to standby may be done.
+ 
+ @throws HealthCheckFailedException
+           if the health check of a service fails.
+ @throws AccessControlException
+           if access is denied.
+ @throws IOException
+           if other errors happen]]>
+      </doc>
+    </method>
+    <method name="transitionToActive"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reqInfo" type="org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo"/>
+      <exception name="ServiceFailedException" type="org.apache.hadoop.ha.ServiceFailedException"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Request service to transition to active state. No operation, if the
+ service is already in active state.
+ 
+ @throws ServiceFailedException
+           if transition from standby to active fails.
+ @throws AccessControlException
+           if access is denied.
+ @throws IOException
+           if other errors happen]]>
+      </doc>
+    </method>
+    <method name="transitionToStandby"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reqInfo" type="org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo"/>
+      <exception name="ServiceFailedException" type="org.apache.hadoop.ha.ServiceFailedException"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Request service to transition to standby state. No operation, if the
+ service is already in standby state.
+ 
+ @throws ServiceFailedException
+           if transition from active to standby fails.
+ @throws AccessControlException
+           if access is denied.
+ @throws IOException
+           if other errors happen]]>
+      </doc>
+    </method>
+    <method name="transitionToObserver"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reqInfo" type="org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo"/>
+      <exception name="ServiceFailedException" type="org.apache.hadoop.ha.ServiceFailedException"/>
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Request service to transition to observer state. No operation, if the
+ service is already in observer state.
+
+ @throws ServiceFailedException
+           if transition from standby to observer fails.
+ @throws AccessControlException
+           if access is denied.
+ @throws IOException
+           if other errors happen]]>
+      </doc>
+    </method>
+    <method name="getServiceStatus" return="org.apache.hadoop.ha.HAServiceStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the current status of the service. The status indicates
+ the current <em>state</em> (e.g ACTIVE/STANDBY) as well as
+ some additional information.
+ 
+ @throws AccessControlException
+           if access is denied.
+ @throws IOException
+           if other errors happen
+ @see HAServiceStatus]]>
+      </doc>
+    </method>
+    <field name="versionID" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Initial version of the protocol]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[Protocol interface that provides High Availability related primitives to
+ monitor and fail-over the service.
+ 
+ This interface could be used by HA frameworks to manage the service.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.ha.HAServiceProtocol -->
+  <!-- start class org.apache.hadoop.ha.HAServiceProtocolHelper -->
+  <class name="HAServiceProtocolHelper" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="HAServiceProtocolHelper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="monitorHealth"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="svc" type="org.apache.hadoop.ha.HAServiceProtocol"/>
+      <param name="reqInfo" type="org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="transitionToActive"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="svc" type="org.apache.hadoop.ha.HAServiceProtocol"/>
+      <param name="reqInfo" type="org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="transitionToStandby"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="svc" type="org.apache.hadoop.ha.HAServiceProtocol"/>
+      <param name="reqInfo" type="org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="transitionToObserver"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="svc" type="org.apache.hadoop.ha.HAServiceProtocol"/>
+      <param name="reqInfo" type="org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Helper for making {@link HAServiceProtocol} RPC calls. This helper
+ unwraps the {@link RemoteException} to specific exceptions.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.ha.HAServiceProtocolHelper -->
+  <!-- start class org.apache.hadoop.ha.HAServiceTarget -->
+  <class name="HAServiceTarget" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="HAServiceTarget"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAddress" return="java.net.InetSocketAddress"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the IPC address of the target node.]]>
+      </doc>
+    </method>
+    <method name="getHealthMonitorAddress" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns an optional separate RPC server address for health checks at the
+ target node.  If defined, then this address is used by the health monitor
+ for the {@link HAServiceProtocol#monitorHealth()} and
+ {@link HAServiceProtocol#getServiceStatus()} calls.  This can be useful for
+ separating out these calls onto separate RPC handlers to protect against
+ resource exhaustion in the main RPC handler pool.  If null (which is the
+ default implementation), then all RPC calls go to the address defined by
+ {@link #getAddress()}.
+
+ @return IPC address of the lifeline RPC server on the target node, or null
+     if no lifeline RPC server is used]]>
+      </doc>
+    </method>
+    <method name="getZKFCAddress" return="java.net.InetSocketAddress"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the IPC address of the ZKFC on the target node]]>
+      </doc>
+    </method>
+    <method name="getFencer" return="org.apache.hadoop.ha.NodeFencer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return a Fencer implementation configured for this target node]]>
+      </doc>
+    </method>
+    <method name="checkFencingConfigured"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="BadFencingConfigurationException" type="org.apache.hadoop.ha.BadFencingConfigurationException"/>
+      <doc>
+      <![CDATA[@throws BadFencingConfigurationException if the fencing configuration
+ appears to be invalid. This is divorced from the above
+ {@link #getFencer()} method so that the configuration can be checked
+ during the pre-flight phase of failover.]]>
+      </doc>
+    </method>
+    <method name="getProxy" return="org.apache.hadoop.ha.HAServiceProtocol"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="timeoutMs" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@return a proxy to connect to the target HA Service.]]>
+      </doc>
+    </method>
+    <method name="setTransitionTargetHAStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="status" type="org.apache.hadoop.ha.HAServiceProtocol.HAServiceState"/>
+    </method>
+    <method name="getTransitionTargetHAStatus" return="org.apache.hadoop.ha.HAServiceProtocol.HAServiceState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getHealthMonitorProxy" return="org.apache.hadoop.ha.HAServiceProtocol"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="timeoutMs" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns a proxy to connect to the target HA service for health monitoring.
+ If {@link #getHealthMonitorAddress()} is implemented to return a non-null
+ address, then this proxy will connect to that address.  Otherwise, the
+ returned proxy defaults to using {@link #getAddress()}, which means this
+ method's behavior is identical to {@link #getProxy(Configuration, int)}.
+
+ @param conf Configuration
+ @param timeoutMs timeout in milliseconds
+ @return a proxy to connect to the target HA service for health monitoring
+ @throws IOException if there is an error]]>
+      </doc>
+    </method>
+    <method name="getHealthMonitorProxy" return="org.apache.hadoop.ha.HAServiceProtocol"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="timeoutMs" type="int"/>
+      <param name="retries" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getZKFCProxy" return="org.apache.hadoop.ha.ZKFCProtocol"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="timeoutMs" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@return a proxy to the ZKFC which is associated with this HA service.]]>
+      </doc>
+    </method>
+    <method name="getFencingParameters" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="addFencingParameters"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="ret" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Hook to allow subclasses to add any parameters they would like to
+ expose to fencing implementations/scripts. Fencing methods are free
+ to use this map as they see fit -- notably, the shell script
+ implementation takes each entry, prepends 'target_', substitutes
+ '_' for '.', and adds it to the environment of the script.
+
+ Subclass implementations should be sure to delegate to the superclass
+ implementation as well as adding their own keys.
+
+ @param ret map which can be mutated to pass parameters to the fencer]]>
+      </doc>
+    </method>
+    <method name="isAutoFailoverEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return true if auto failover should be considered enabled]]>
+      </doc>
+    </method>
+    <method name="supportObserver" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return true if this target supports the Observer state, false otherwise.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Represents a target of the client side HA administration commands.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.ha.HAServiceTarget -->
+  <!-- start class org.apache.hadoop.ha.HealthCheckFailedException -->
+  <class name="HealthCheckFailedException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="HealthCheckFailedException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="HealthCheckFailedException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Exception thrown to indicate that health check of a service failed.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.ha.HealthCheckFailedException -->
+  <!-- start class org.apache.hadoop.ha.ServiceFailedException -->
+  <class name="ServiceFailedException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ServiceFailedException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ServiceFailedException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Exception thrown to indicate that an operation performed
+ to modify the state of a service or application failed.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.ha.ServiceFailedException -->
+</package>
+<package name="org.apache.hadoop.ha.protocolPB">
+  <!-- start interface org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB -->
+  <interface name="HAServiceProtocolPB"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService.BlockingInterface"/>
+    <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+  </interface>
+  <!-- end interface org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB -->
+  <!-- start interface org.apache.hadoop.ha.protocolPB.ZKFCProtocolPB -->
+  <interface name="ZKFCProtocolPB"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.ha.proto.ZKFCProtocolProtos.ZKFCProtocolService.BlockingInterface"/>
+    <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
+  </interface>
+  <!-- end interface org.apache.hadoop.ha.protocolPB.ZKFCProtocolPB -->
+</package>
+<package name="org.apache.hadoop.http.lib">
+  <doc>
+  <![CDATA[This package provides user-selectable (via configuration) classes that add
+functionality to the web UI. They are configured as a list of classes in the
+configuration parameter <b>hadoop.http.filter.initializers</b>.
+
+<ul>
+<li> <b>StaticUserWebFilter</b> - An authorization plugin that makes all
+users a static configured user.
+</ul>]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.io">
+  <!-- start class org.apache.hadoop.io.AbstractMapWritable -->
+  <class name="AbstractMapWritable" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="AbstractMapWritable"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[constructor.]]>
+      </doc>
+    </constructor>
+    <method name="addToMap"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="clazz" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Add a Class to the maps if it is not already present.]]>
+      </doc>
+    </method>
+    <method name="getClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="id" type="byte"/>
+      <doc>
+      <![CDATA[@return the Class class for the specified id]]>
+      </doc>
+    </method>
+    <method name="getId" return="byte"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="clazz" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[@return the id for the specified Class]]>
+      </doc>
+    </method>
+    <method name="copy"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.io.Writable"/>
+      <doc>
+      <![CDATA[Used by child copy constructors.]]>
+      </doc>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the conf]]>
+      </doc>
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[@param conf the conf to set]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Abstract base class for MapWritable and SortedMapWritable
+ 
+ Unlike org.apache.nutch.crawl.MapWritable, this class allows creation of
+ MapWritable&lt;Writable, MapWritable&gt; so the CLASS_TO_ID and ID_TO_CLASS
+ maps travel with the class instead of being static.
+ 
+ Class ids range from 1 to 127 so there can be at most 127 distinct classes
+ in any specific map instance.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.AbstractMapWritable -->
+  <!-- start class org.apache.hadoop.io.ArrayFile -->
+  <class name="ArrayFile" extends="org.apache.hadoop.io.MapFile"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ArrayFile"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[A dense file-based mapping from integers to values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.ArrayFile -->
+  <!-- start class org.apache.hadoop.io.ArrayPrimitiveWritable -->
+  <class name="ArrayPrimitiveWritable" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="ArrayPrimitiveWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct an empty instance, for use during Writable read]]>
+      </doc>
+    </constructor>
+    <constructor name="ArrayPrimitiveWritable" type="java.lang.Class"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct an instance of known type but no value yet
+ for use with type-specific wrapper classes]]>
+      </doc>
+    </constructor>
+    <constructor name="ArrayPrimitiveWritable" type="java.lang.Object"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Wrap an existing array of primitives
+ @param value - array of primitives]]>
+      </doc>
+    </constructor>
+    <method name="get" return="java.lang.Object"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the original array.  
+ Client must cast it back to type componentType[]
+ (or may use type-specific wrapper classes).
+ @return - original array as Object]]>
+      </doc>
+    </method>
+    <method name="getComponentType" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDeclaredComponentType" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isDeclaredComponentType" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="componentType" type="java.lang.Class"/>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="java.lang.Object"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[This is a wrapper class.  It wraps a Writable implementation around
+ an array of primitives (e.g., int[], long[], etc.), with optimized 
+ wire format, and without creating new objects per element.
+ 
+ This is a wrapper class only; it does not make a copy of the 
+ underlying array.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.ArrayPrimitiveWritable -->
+  <!-- start class org.apache.hadoop.io.ArrayWritable -->
+  <class name="ArrayWritable" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="ArrayWritable" type="java.lang.Class"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ArrayWritable" type="java.lang.String[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getValueClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toStrings" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toArray" return="java.lang.Object"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="values" type="org.apache.hadoop.io.Writable[]"/>
+    </method>
+    <method name="get" return="org.apache.hadoop.io.Writable[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A Writable for arrays containing instances of a class. The elements of this
+ writable must all be instances of the same class. If this writable will be
+ the input for a Reducer, you will need to create a subclass that sets the
+ value to be of the proper type.
+
+ For example:
+ <code>
+ public class IntArrayWritable extends ArrayWritable {
+   public IntArrayWritable() { 
+     super(IntWritable.class); 
+   }	
+ }
+ </code>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.ArrayWritable -->
+  <!-- start class org.apache.hadoop.io.BinaryComparable -->
+  <class name="BinaryComparable" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="BinaryComparable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getLength" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return n st bytes 0..n-1 from {#getBytes()} are valid.]]>
+      </doc>
+    </method>
+    <method name="getBytes" return="byte[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return representative byte array for this instance.]]>
+      </doc>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.io.BinaryComparable"/>
+      <doc>
+      <![CDATA[Compare bytes from {#getBytes()}.
+ @see org.apache.hadoop.io.WritableComparator#compareBytes(byte[],int,int,byte[],int,int)]]>
+      </doc>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <doc>
+      <![CDATA[Compare bytes from {#getBytes()} to those provided.]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Return true if bytes from {#getBytes()} match.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return a hash of the bytes returned from {#getBytes()}.
+ @see org.apache.hadoop.io.WritableComparator#hashBytes(byte[],int)]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface supported by {@link org.apache.hadoop.io.WritableComparable}
+ types supporting ordering/permutation by a representative set of bytes.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.BinaryComparable -->
+  <!-- start class org.apache.hadoop.io.BloomMapFile -->
+  <class name="BloomMapFile" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="BloomMapFile"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="delete"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="BLOOM_FILE_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HASH_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class extends {@link MapFile} and provides very much the same
+ functionality. However, it uses dynamic Bloom filters to provide
+ quick membership test for keys, and it offers a fast version of 
+ {@link Reader#get(WritableComparable, Writable)} operation, especially in
+ case of sparsely populated MapFile-s.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.BloomMapFile -->
+  <!-- start class org.apache.hadoop.io.BooleanWritable -->
+  <class name="BooleanWritable" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.WritableComparable"/>
+    <constructor name="BooleanWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="BooleanWritable" type="boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="boolean"/>
+      <doc>
+      <![CDATA[Set the value of the BooleanWritable]]>
+      </doc>
+    </method>
+    <method name="get" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the value of the BooleanWritable]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.io.BooleanWritable"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A WritableComparable for booleans.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.BooleanWritable -->
+  <!-- start interface org.apache.hadoop.io.ByteBufferPool -->
+  <interface name="ByteBufferPool"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getBuffer" return="java.nio.ByteBuffer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="direct" type="boolean"/>
+      <param name="length" type="int"/>
+      <doc>
+      <![CDATA[Get a new direct ByteBuffer.  The pool can provide this from
+ removing a buffer from its internal cache, or by allocating a 
+ new buffer.
+
+ @param direct     Whether the buffer should be direct.
+ @param length     The minimum length the buffer will have.
+ @return           A new ByteBuffer.  This ByteBuffer must be direct.
+                   Its capacity can be less than what was requested, but
+                   must be at least 1 byte.]]>
+      </doc>
+    </method>
+    <method name="putBuffer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="buffer" type="java.nio.ByteBuffer"/>
+      <doc>
+      <![CDATA[Release a buffer back to the pool.
+ The pool may choose to put this buffer into its cache.
+
+ @param buffer    a direct bytebuffer]]>
+      </doc>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.io.ByteBufferPool -->
+  <!-- start class org.apache.hadoop.io.BytesWritable -->
+  <class name="BytesWritable" extends="org.apache.hadoop.io.BinaryComparable"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.WritableComparable"/>
+    <constructor name="BytesWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a zero-size sequence.]]>
+      </doc>
+    </constructor>
+    <constructor name="BytesWritable" type="byte[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a BytesWritable using the byte array as the initial value.
+ @param bytes This array becomes the backing storage for the object.]]>
+      </doc>
+    </constructor>
+    <constructor name="BytesWritable" type="byte[], int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a BytesWritable using the byte array as the initial value
+ and length as the length. Use this constructor if the array is larger
+ than the value it represents.
+ @param bytes This array becomes the backing storage for the object.
+ @param length The number of bytes to use from array.]]>
+      </doc>
+    </constructor>
+    <method name="copyBytes" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a copy of the bytes that is exactly the length of the data.
+ See {@link #getBytes()} for faster access to the underlying array.]]>
+      </doc>
+    </method>
+    <method name="getBytes" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the data backing the BytesWritable. Please use {@link #copyBytes()}
+ if you need the returned array to be precisely the length of the data.
+ @return The data is only valid between 0 and getLength() - 1.]]>
+      </doc>
+    </method>
+    <method name="get" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #getBytes()} instead.">
+      <doc>
+      <![CDATA[Get the data from the BytesWritable.
+ @deprecated Use {@link #getBytes()} instead.]]>
+      </doc>
+    </method>
+    <method name="getLength" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current size of the buffer.]]>
+      </doc>
+    </method>
+    <method name="getSize" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #getLength()} instead.">
+      <doc>
+      <![CDATA[Get the current size of the buffer.
+ @deprecated Use {@link #getLength()} instead.]]>
+      </doc>
+    </method>
+    <method name="setSize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="size" type="int"/>
+      <doc>
+      <![CDATA[Change the size of the buffer. The values in the old range are preserved
+ and any new values are undefined. The capacity is changed if it is 
+ necessary.
+ @param size The new number of bytes]]>
+      </doc>
+    </method>
+    <method name="getCapacity" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the capacity, which is the maximum size that could handled without
+ resizing the backing storage.
+ @return The number of bytes]]>
+      </doc>
+    </method>
+    <method name="setCapacity"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="new_cap" type="int"/>
+      <doc>
+      <![CDATA[Change the capacity of the backing storage.
+ The data is preserved.
+ @param new_cap The new capacity in bytes.]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newData" type="org.apache.hadoop.io.BytesWritable"/>
+      <doc>
+      <![CDATA[Set the BytesWritable to the contents of the given newData.
+ @param newData the value to set this BytesWritable to.]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newData" type="byte[]"/>
+      <param name="offset" type="int"/>
+      <param name="length" type="int"/>
+      <doc>
+      <![CDATA[Set the value to a copy of the given byte range
+ @param newData the new values to copy in
+ @param offset the offset in newData to start at
+ @param length the number of bytes to copy]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="right_obj" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Are the two byte sequences equal?]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Generate the stream of bytes as hex pairs separated by ' '.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A byte sequence that is usable as a key or value.
+ It is resizable and distinguishes between the size of the sequence and
+ the current capacity. The hash function is the front of the md5 of the 
+ buffer. The sort order is the same as memcmp.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.BytesWritable -->
+  <!-- start class org.apache.hadoop.io.ByteWritable -->
+  <class name="ByteWritable" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.WritableComparable"/>
+    <constructor name="ByteWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ByteWritable" type="byte"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="byte"/>
+      <doc>
+      <![CDATA[Set the value of this ByteWritable.]]>
+      </doc>
+    </method>
+    <method name="get" return="byte"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the value of this ByteWritable.]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Returns true iff <code>o</code> is a ByteWritable with the same value.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.io.ByteWritable"/>
+      <doc>
+      <![CDATA[Compares two ByteWritables.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A WritableComparable for a single byte.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.ByteWritable -->
+  <!-- start interface org.apache.hadoop.io.Closeable -->
+  <interface name="Closeable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="use java.io.Closeable">
+    <implements name="java.io.Closeable"/>
+    <doc>
+    <![CDATA[@deprecated use java.io.Closeable]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.io.Closeable -->
+  <!-- start class org.apache.hadoop.io.CompressedWritable -->
+  <class name="CompressedWritable" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="CompressedWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="ensureInflated"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Must be called by all methods which access fields to ensure that the data
+ has been uncompressed.]]>
+      </doc>
+    </method>
+    <method name="readFieldsCompressed"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Subclasses implement this instead of {@link #readFields(DataInput)}.]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="writeCompressed"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Subclasses implement this instead of {@link #write(DataOutput)}.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A base-class for Writables which store themselves compressed and lazily
+ inflate on field access.  This is useful for large objects whose fields are
+ not be altered during a map or reduce operation: leaving the field data
+ compressed makes copying the instance from one file to another much
+ faster.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.CompressedWritable -->
+  <!-- start class org.apache.hadoop.io.DataOutputOutputStream -->
+  <class name="DataOutputOutputStream" extends="java.io.OutputStream"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="constructOutputStream" return="java.io.OutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <doc>
+      <![CDATA[Construct an OutputStream from the given DataOutput. If 'out'
+ is already an OutputStream, simply returns it. Otherwise, wraps
+ it in an OutputStream.
+ @param out the DataOutput to wrap
+ @return an OutputStream instance that outputs to 'out']]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[OutputStream implementation that wraps a DataOutput.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.DataOutputOutputStream -->
+  <!-- start class org.apache.hadoop.io.DefaultStringifier -->
+  <class name="DefaultStringifier" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Stringifier"/>
+    <constructor name="DefaultStringifier" type="org.apache.hadoop.conf.Configuration, java.lang.Class"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="fromString" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="str" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="T"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="store"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="item" type="K"/>
+      <param name="keyName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Stores the item in the configuration with the given keyName.
+ 
+ @param <K>  the class of the item
+ @param conf the configuration to store
+ @param item the object to be stored
+ @param keyName the name of the key to use
+ @throws IOException : forwards Exceptions from the underlying 
+ {@link Serialization} classes.]]>
+      </doc>
+    </method>
+    <method name="load" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="keyName" type="java.lang.String"/>
+      <param name="itemClass" type="java.lang.Class"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Restores the object from the configuration.
+ 
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying 
+ {@link Serialization} classes.]]>
+      </doc>
+    </method>
+    <method name="storeArray"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="items" type="K[]"/>
+      <param name="keyName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Stores the array of items in the configuration with the given keyName.
+ 
+ @param <K> the class of the item
+ @param conf the configuration to use 
+ @param items the objects to be stored
+ @param keyName the name of the key to use
+ @throws IndexOutOfBoundsException if the items array is empty
+ @throws IOException : forwards Exceptions from the underlying 
+ {@link Serialization} classes.]]>
+      </doc>
+    </method>
+    <method name="loadArray" return="K[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="keyName" type="java.lang.String"/>
+      <param name="itemClass" type="java.lang.Class"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Restores the array of objects from the configuration.
+ 
+ @param <K> the class of the item
+ @param conf the configuration to use
+ @param keyName the name of the key to use
+ @param itemClass the class of the item
+ @return restored object
+ @throws IOException : forwards Exceptions from the underlying 
+ {@link Serialization} classes.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[DefaultStringifier is the default implementation of the {@link Stringifier}
+ interface which stringifies the objects using base64 encoding of the
+ serialized version of the objects. The {@link Serializer} and
+ {@link Deserializer} are obtained from the {@link SerializationFactory}.
+ <br>
+ DefaultStringifier offers convenience methods to store/load objects to/from
+ the configuration.
+ 
+ @param <T> the class of the objects to stringify]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.DefaultStringifier -->
+  <!-- start class org.apache.hadoop.io.DoubleWritable -->
+  <class name="DoubleWritable" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.WritableComparable"/>
+    <constructor name="DoubleWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="DoubleWritable" type="double"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="double"/>
+    </method>
+    <method name="get" return="double"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Returns true iff <code>o</code> is a DoubleWritable with the same value.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.io.DoubleWritable"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Writable for Double values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.DoubleWritable -->
+  <!-- start class org.apache.hadoop.io.ElasticByteBufferPool -->
+  <class name="ElasticByteBufferPool" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.ByteBufferPool"/>
+    <constructor name="ElasticByteBufferPool"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getBuffer" return="java.nio.ByteBuffer"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="direct" type="boolean"/>
+      <param name="length" type="int"/>
+    </method>
+    <method name="putBuffer"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="buffer" type="java.nio.ByteBuffer"/>
+    </method>
+    <doc>
+    <![CDATA[This is a simple ByteBufferPool which just creates ByteBuffers as needed.
+ It also caches ByteBuffers after they're released.  It will always return
+ the smallest cached buffer with at least the capacity you request.
+ We don't try to do anything clever here like try to limit the maximum cache
+ size.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.ElasticByteBufferPool -->
+  <!-- start class org.apache.hadoop.io.EnumSetWritable -->
+  <class name="EnumSetWritable" extends="java.util.AbstractCollection"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="EnumSetWritable" type="java.util.EnumSet, java.lang.Class"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a new EnumSetWritable. If the <tt>value</tt> argument is null or
+ its size is zero, the <tt>elementType</tt> argument must not be null. If
+ the argument <tt>value</tt>'s size is bigger than zero, the argument
+ <tt>elementType</tt> is not be used.
+ 
+ @param value
+ @param elementType]]>
+      </doc>
+    </constructor>
+    <constructor name="EnumSetWritable" type="java.util.EnumSet"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a new EnumSetWritable. Argument <tt>value</tt> should not be null
+ or empty.
+ 
+ @param value]]>
+      </doc>
+    </constructor>
+    <method name="iterator" return="java.util.Iterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="size" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="add" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="e" type="E"/>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="java.util.EnumSet"/>
+      <param name="elementType" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[reset the EnumSetWritable with specified
+ <tt>value</tt> and <tt>elementType</tt>. If the <tt>value</tt> argument
+ is null or its size is zero, the <tt>elementType</tt> argument must not be
+ null. If the argument <tt>value</tt>'s size is bigger than zero, the
+ argument <tt>elementType</tt> is not be used.
+ 
+ @param value
+ @param elementType]]>
+      </doc>
+    </method>
+    <method name="get" return="java.util.EnumSet"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the value of this EnumSetWritable.]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Returns true if <code>o</code> is an EnumSetWritable with the same value,
+ or both are null.]]>
+      </doc>
+    </method>
+    <method name="getElementType" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the class of all the elements of the underlying EnumSetWriable. It
+ may return null.
+ 
+ @return the element class]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <doc>
+    <![CDATA[A Writable wrapper for EnumSet.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.EnumSetWritable -->
+  <!-- start class org.apache.hadoop.io.FloatWritable -->
+  <class name="FloatWritable" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.WritableComparable"/>
+    <constructor name="FloatWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FloatWritable" type="float"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="float"/>
+      <doc>
+      <![CDATA[Set the value of this FloatWritable.]]>
+      </doc>
+    </method>
+    <method name="get" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the value of this FloatWritable.]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Returns true iff <code>o</code> is a FloatWritable with the same value.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.io.FloatWritable"/>
+      <doc>
+      <![CDATA[Compares two FloatWritables.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A WritableComparable for floats.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.FloatWritable -->
+  <!-- start class org.apache.hadoop.io.GenericWritable -->
+  <class name="GenericWritable" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="GenericWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="org.apache.hadoop.io.Writable"/>
+      <doc>
+      <![CDATA[Set the instance that is wrapped.
+ 
+ @param obj]]>
+      </doc>
+    </method>
+    <method name="get" return="org.apache.hadoop.io.Writable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the wrapped instance.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getTypes" return="java.lang.Class[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return all classes that may be wrapped.  Subclasses should implement this
+ to return a constant array of classes.]]>
+      </doc>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <doc>
+    <![CDATA[A wrapper for Writable instances.
+ <p>
+ When two sequence files, which have same Key type but different Value
+ types, are mapped out to reduce, multiple Value types is not allowed.
+ In this case, this class can help you wrap instances with different types.
+ </p>
+ 
+ <p>
+ Compared with <code>ObjectWritable</code>, this class is much more effective,
+ because <code>ObjectWritable</code> will append the class declaration as a String 
+ into the output file in every Key-Value pair.
+ </p>
+ 
+ <p>
+ Generic Writable implements {@link Configurable} interface, so that it will be 
+ configured by the framework. The configuration is passed to the wrapped objects
+ implementing {@link Configurable} interface <i>before deserialization</i>. 
+ </p>
+ 
+ how to use it: <br>
+ 1. Write your own class, such as GenericObject, which extends GenericWritable.<br> 
+ 2. Implements the abstract method <code>getTypes()</code>, defines 
+    the classes which will be wrapped in GenericObject in application.
+    Attention: this classes defined in <code>getTypes()</code> method, must
+    implement <code>Writable</code> interface.
+ <br><br>
+ 
+ The code looks like this:
+ <blockquote><pre>
+ public class GenericObject extends GenericWritable {
+ 
+   private static Class[] CLASSES = {
+               ClassType1.class, 
+               ClassType2.class,
+               ClassType3.class,
+               };
+
+   protected Class[] getTypes() {
+       return CLASSES;
+   }
+
+ }
+ </pre></blockquote>
+ 
+ @since Nov 8, 2006]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.GenericWritable -->
+  <!-- start class org.apache.hadoop.io.IntWritable -->
+  <class name="IntWritable" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.WritableComparable"/>
+    <constructor name="IntWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="IntWritable" type="int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="int"/>
+      <doc>
+      <![CDATA[Set the value of this IntWritable.]]>
+      </doc>
+    </method>
+    <method name="get" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the value of this IntWritable.]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Returns true iff <code>o</code> is a IntWritable with the same value.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.io.IntWritable"/>
+      <doc>
+      <![CDATA[Compares two IntWritables.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A WritableComparable for ints.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.IntWritable -->
+  <!-- start class org.apache.hadoop.io.IOUtils -->
+  <class name="IOUtils" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="IOUtils"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="copyBytes"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="out" type="java.io.OutputStream"/>
+      <param name="buffSize" type="int"/>
+      <param name="close" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copies from one stream to another.
+
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param buffSize the size of the buffer 
+ @param close whether or not close the InputStream and 
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+      </doc>
+    </method>
+    <method name="copyBytes"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="out" type="java.io.OutputStream"/>
+      <param name="buffSize" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copies from one stream to another.
+ 
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param buffSize the size of the buffer]]>
+      </doc>
+    </method>
+    <method name="copyBytes"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="out" type="java.io.OutputStream"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copies from one stream to another. <strong>closes the input and output streams 
+ at the end</strong>.
+
+ @param in InputStrem to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object]]>
+      </doc>
+    </method>
+    <method name="copyBytes"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="out" type="java.io.OutputStream"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="close" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copies from one stream to another.
+
+ @param in InputStream to read from
+ @param out OutputStream to write to
+ @param conf the Configuration object
+ @param close whether or not close the InputStream and 
+ OutputStream at the end. The streams are closed in the finally clause.]]>
+      </doc>
+    </method>
+    <method name="copyBytes"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="out" type="java.io.OutputStream"/>
+      <param name="count" type="long"/>
+      <param name="close" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copies count bytes from one stream to another.
+
+ @param in InputStream to read from
+ @param out OutputStream to write to
+ @param count number of bytes to copy
+ @param close whether to close the streams
+ @throws IOException if bytes can not be read or written]]>
+      </doc>
+    </method>
+    <method name="wrappedReadForCompressedData" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="is" type="java.io.InputStream"/>
+      <param name="buf" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Utility wrapper for reading from {@link InputStream}. It catches any errors
+ thrown by the underlying stream (either IO or decompression-related), and
+ re-throws as an IOException.
+ 
+ @param is - InputStream to be read from
+ @param buf - buffer the data is read into
+ @param off - offset within buf
+ @param len - amount of data to be read
+ @return number of bytes read]]>
+      </doc>
+    </method>
+    <method name="readFully"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="buf" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Reads len bytes in a loop.
+
+ @param in InputStream to read from
+ @param buf The buffer to fill
+ @param off offset from the buffer
+ @param len the length of bytes to read
+ @throws IOException if it could not read requested number of bytes 
+ for any reason (including EOF)]]>
+      </doc>
+    </method>
+    <method name="skipFully"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="len" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Similar to readFully(). Skips bytes in a loop.
+ @param in The InputStream to skip bytes from
+ @param len number of bytes to skip.
+ @throws IOException if it could not skip requested number of bytes 
+ for any reason (including EOF)]]>
+      </doc>
+    </method>
+    <method name="cleanup"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="use {@link #cleanupWithLogger(Logger, java.io.Closeable...)}
+ instead">
+      <param name="log" type="org.apache.commons.logging.Log"/>
+      <param name="closeables" type="java.io.Closeable[]"/>
+      <doc>
+      <![CDATA[Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
+ null pointers. Must only be used for cleanup in exception handlers.
+
+ @param log the log to record problems to at debug level. Can be null.
+ @param closeables the objects to close
+ @deprecated use {@link #cleanupWithLogger(Logger, java.io.Closeable...)}
+ instead]]>
+      </doc>
+    </method>
+    <method name="cleanupWithLogger"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="logger" type="org.slf4j.Logger"/>
+      <param name="closeables" type="java.io.Closeable[]"/>
+      <doc>
+      <![CDATA[Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
+ null pointers. Must only be used for cleanup in exception handlers.
+
+ @param logger the log to record problems to at debug level. Can be null.
+ @param closeables the objects to close]]>
+      </doc>
+    </method>
+    <method name="closeStream"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="stream" type="java.io.Closeable"/>
+      <doc>
+      <![CDATA[Closes the stream ignoring {@link Throwable}.
+ Must only be called in cleaning up from exception handlers.
+
+ @param stream the Stream to close]]>
+      </doc>
+    </method>
+    <method name="closeStreams"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="streams" type="java.io.Closeable[]"/>
+      <doc>
+      <![CDATA[Closes the streams ignoring {@link Throwable}.
+ Must only be called in cleaning up from exception handlers.
+
+ @param streams the Streams to close]]>
+      </doc>
+    </method>
+    <method name="closeSocket"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="sock" type="java.net.Socket"/>
+      <doc>
+      <![CDATA[Closes the socket ignoring {@link IOException}
+
+ @param sock the Socket to close]]>
+      </doc>
+    </method>
+    <method name="writeFully"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bc" type="java.nio.channels.WritableByteChannel"/>
+      <param name="buf" type="java.nio.ByteBuffer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write a ByteBuffer to a WritableByteChannel, handling short writes.
+ 
+ @param bc               The WritableByteChannel to write to
+ @param buf              The input buffer
+ @throws IOException     On I/O error]]>
+      </doc>
+    </method>
+    <method name="writeFully"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fc" type="java.nio.channels.FileChannel"/>
+      <param name="buf" type="java.nio.ByteBuffer"/>
+      <param name="offset" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write a ByteBuffer to a FileChannel at a given offset, 
+ handling short writes.
+ 
+ @param fc               The FileChannel to write to
+ @param buf              The input buffer
+ @param offset           The offset in the file to start writing at
+ @throws IOException     On I/O error]]>
+      </doc>
+    </method>
+    <method name="listDirectory" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dir" type="java.io.File"/>
+      <param name="filter" type="java.io.FilenameFilter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the complete list of files in a directory as strings.<p>
+
+ This is better than File#listDir because it does not ignore IOExceptions.
+
+ @param dir              The directory to list.
+ @param filter           If non-null, the filter to use when listing
+                         this directory.
+ @return                 The list of files in the directory.
+
+ @throws IOException     On I/O error]]>
+      </doc>
+    </method>
+    <method name="fsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fileToSync" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Ensure that any writes to the given file is written to the storage device
+ that contains it. This method opens channel on given File and closes it
+ once the sync is done.<br>
+ Borrowed from Uwe Schindler in LUCENE-5588
+ @param fileToSync the file to fsync]]>
+      </doc>
+    </method>
+    <method name="fsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="channel" type="java.nio.channels.FileChannel"/>
+      <param name="isDir" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Ensure that any writes to the given file is written to the storage device
+ that contains it. This method opens channel on given File and closes it
+ once the sync is done.
+ Borrowed from Uwe Schindler in LUCENE-5588
+ @param channel Channel to sync
+ @param isDir if true, the given file is a directory (Channel should be
+          opened for read and ignore IOExceptions, because not all file
+          systems and operating systems allow to fsync on a directory)
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="wrapException" return="java.io.IOException"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="java.lang.String"/>
+      <param name="methodName" type="java.lang.String"/>
+      <param name="exception" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Takes an IOException, file/directory path, and method name and returns an
+ IOException with the input exception as the cause and also include the
+ file,method details. The new exception provides the stack trace of the
+ place where the exception is thrown and some extra diagnostics
+ information.
+
+ Return instance of same exception if exception class has a public string
+ constructor; Otherwise return an PathIOException.
+ InterruptedIOException and PathIOException are returned unwrapped.
+
+ @param path file/directory path
+ @param methodName method name
+ @param exception the caught exception.
+ @return an exception to throw]]>
+      </doc>
+    </method>
+    <method name="readFullyToByteArray" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Reads a DataInput until EOF and returns a byte array.  Make sure not to
+ pass in an infinite DataInput or this will never return.
+
+ @param in A DataInput
+ @return a byte array containing the data from the DataInput
+ @throws IOException on I/O error, other than EOF]]>
+      </doc>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[An utility class for I/O related functionality.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.IOUtils -->
+  <!-- start class org.apache.hadoop.io.LongWritable -->
+  <class name="LongWritable" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.WritableComparable"/>
+    <constructor name="LongWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="LongWritable" type="long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Set the value of this LongWritable.]]>
+      </doc>
+    </method>
+    <method name="get" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the value of this LongWritable.]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Returns true iff <code>o</code> is a LongWritable with the same value.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.io.LongWritable"/>
+      <doc>
+      <![CDATA[Compares two LongWritables.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A WritableComparable for longs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.LongWritable -->
+  <!-- start class org.apache.hadoop.io.MapFile -->
+  <class name="MapFile" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MapFile"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="rename"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="oldName" type="java.lang.String"/>
+      <param name="newName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Renames an existing map directory.]]>
+      </doc>
+    </method>
+    <method name="delete"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Deletes the named map file.]]>
+      </doc>
+    </method>
+    <method name="fix" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="dir" type="org.apache.hadoop.fs.Path"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valueClass" type="java.lang.Class"/>
+      <param name="dryrun" type="boolean"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[This method attempts to fix a corrupt MapFile by re-creating its index.
+ @param fs filesystem
+ @param dir directory containing the MapFile data and index
+ @param keyClass key class (has to be a subclass of Writable)
+ @param valueClass value class (has to be a subclass of Writable)
+ @param dryrun do not perform any changes, just report what needs to be done
+ @return number of valid entries in this MapFile, or -1 if no fixing was needed
+ @throws Exception]]>
+      </doc>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <field name="INDEX_FILE_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The name of the index file.]]>
+      </doc>
+    </field>
+    <field name="DATA_FILE_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The name of the data file.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[A file-based map from keys to values.
+ 
+ <p>A map is a directory containing two files, the <code>data</code> file,
+ containing all keys and values in the map, and a smaller <code>index</code>
+ file, containing a fraction of the keys.  The fraction is determined by
+ {@link Writer#getIndexInterval()}.
+
+ <p>The index file is read entirely into memory.  Thus key implementations
+ should try to keep themselves small.
+
+ <p>Map files are created by adding entries in-order.  To maintain a large
+ database, perform updates by copying the previous version of a database and
+ merging in a sorted change list, to create a new version of the database in
+ a new file.  Sorting large change lists can be done with {@link
+ SequenceFile.Sorter}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.MapFile -->
+  <!-- start class org.apache.hadoop.io.MapWritable -->
+  <class name="MapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.Map"/>
+    <constructor name="MapWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor.]]>
+      </doc>
+    </constructor>
+    <constructor name="MapWritable" type="org.apache.hadoop.io.MapWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Copy constructor.
+ 
+ @param other the map to copy from]]>
+      </doc>
+    </constructor>
+    <method name="clear"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="containsKey" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Object"/>
+    </method>
+    <method name="containsValue" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="java.lang.Object"/>
+    </method>
+    <method name="entrySet" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="get" return="org.apache.hadoop.io.Writable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isEmpty" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="keySet" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="put" return="org.apache.hadoop.io.Writable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.io.Writable"/>
+      <param name="value" type="org.apache.hadoop.io.Writable"/>
+    </method>
+    <method name="putAll"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="t" type="java.util.Map"/>
+    </method>
+    <method name="remove" return="org.apache.hadoop.io.Writable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Object"/>
+    </method>
+    <method name="size" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="values" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A Writable Map.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.MapWritable -->
+  <!-- start class org.apache.hadoop.io.MD5Hash -->
+  <class name="MD5Hash" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.WritableComparable"/>
+    <constructor name="MD5Hash"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs an MD5Hash.]]>
+      </doc>
+    </constructor>
+    <constructor name="MD5Hash" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs an MD5Hash from a hex string.]]>
+      </doc>
+    </constructor>
+    <constructor name="MD5Hash" type="byte[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs an MD5Hash with a specified value.]]>
+      </doc>
+    </constructor>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="read" return="org.apache.hadoop.io.MD5Hash"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Constructs, reads and returns an instance.]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="that" type="org.apache.hadoop.io.MD5Hash"/>
+      <doc>
+      <![CDATA[Copy the contents of another instance into this instance.]]>
+      </doc>
+    </method>
+    <method name="getDigest" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the digest bytes.]]>
+      </doc>
+    </method>
+    <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="data" type="byte[]"/>
+      <doc>
+      <![CDATA[Construct a hash value for a byte array.]]>
+      </doc>
+    </method>
+    <method name="getDigester" return="java.security.MessageDigest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a thread local MD5 digester]]>
+      </doc>
+    </method>
+    <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct a hash value for the content from the InputStream.]]>
+      </doc>
+    </method>
+    <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="data" type="byte[]"/>
+      <param name="start" type="int"/>
+      <param name="len" type="int"/>
+      <doc>
+      <![CDATA[Construct a hash value for a byte array.]]>
+      </doc>
+    </method>
+    <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dataArr" type="byte[][]"/>
+      <param name="start" type="int"/>
+      <param name="len" type="int"/>
+      <doc>
+      <![CDATA[Construct a hash value for an array of byte array.]]>
+      </doc>
+    </method>
+    <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="string" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Construct a hash value for a String.]]>
+      </doc>
+    </method>
+    <method name="digest" return="org.apache.hadoop.io.MD5Hash"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="utf8" type="org.apache.hadoop.io.UTF8"/>
+      <doc>
+      <![CDATA[Construct a hash value for a String.]]>
+      </doc>
+    </method>
+    <method name="halfDigest" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a half-sized version of this MD5.  Fits in a long]]>
+      </doc>
+    </method>
+    <method name="quarterDigest" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return a 32-bit digest of the MD5.
+ @return the first 4 bytes of the md5]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Returns true iff <code>o</code> is an MD5Hash whose digest contains the
+ same values.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns a hash code value for this object.
+ Only uses the first 4 bytes, since md5s are evenly distributed.]]>
+      </doc>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="that" type="org.apache.hadoop.io.MD5Hash"/>
+      <doc>
+      <![CDATA[Compares this object with the specified object for order.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns a string representation of this object.]]>
+      </doc>
+    </method>
+    <method name="setDigest"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hex" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Sets the digest value from a hex string.]]>
+      </doc>
+    </method>
+    <field name="MD5_LEN" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A Writable for MD5 hash values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.MD5Hash -->
+  <!-- start class org.apache.hadoop.io.MultipleIOException -->
+  <class name="MultipleIOException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getExceptions" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the underlying exceptions]]>
+      </doc>
+    </method>
+    <method name="createIOException" return="java.io.IOException"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="exceptions" type="java.util.List"/>
+      <doc>
+      <![CDATA[A convenient method to create an {@link IOException}.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Encapsulate a list of {@link IOException} into an {@link IOException}]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.MultipleIOException -->
+  <!-- start class org.apache.hadoop.io.NullWritable -->
+  <class name="NullWritable" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.WritableComparable"/>
+    <method name="get" return="org.apache.hadoop.io.NullWritable"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the single instance of this class.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.io.NullWritable"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Singleton Writable with no data.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.NullWritable -->
+  <!-- start class org.apache.hadoop.io.ObjectWritable -->
+  <class name="ObjectWritable" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="ObjectWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ObjectWritable" type="java.lang.Object"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ObjectWritable" type="java.lang.Class, java.lang.Object"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="get" return="java.lang.Object"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the instance, or null if none.]]>
+      </doc>
+    </method>
+    <method name="getDeclaredClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the class this is meant to be.]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="instance" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Reset the instance.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="writeObject"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <param name="instance" type="java.lang.Object"/>
+      <param name="declaredClass" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+      </doc>
+    </method>
+    <method name="writeObject"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <param name="instance" type="java.lang.Object"/>
+      <param name="declaredClass" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="allowCompactArrays" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.  
+ 
+ @param allowCompactArrays - set true for RPC and internal or intra-cluster
+ usages.  Set false for inter-cluster, File, and other persisted output 
+ usages, to preserve the ability to interchange files with other clusters 
+ that may not be running the same version of software.  Sometime in ~2013 
+ we can consider removing this parameter and always using the compact format.]]>
+      </doc>
+    </method>
+    <method name="readObject" return="java.lang.Object"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+      </doc>
+    </method>
+    <method name="readObject" return="java.lang.Object"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <param name="objectWritable" type="org.apache.hadoop.io.ObjectWritable"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of
+ the preceding.]]>
+      </doc>
+    </method>
+    <method name="loadClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="className" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Find and load the class with given name <tt>className</tt> by first finding
+ it in the specified <tt>conf</tt>. If the specified <tt>conf</tt> is null,
+ try load it directly.]]>
+      </doc>
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A polymorphic Writable that writes an instance with it's class name.
+ Handles arrays, strings and primitive types without a Writable wrapper.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.ObjectWritable -->
+  <!-- start interface org.apache.hadoop.io.RawComparator -->
+  <interface name="RawComparator"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.Comparator"/>
+    <method name="compare" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b1" type="byte[]"/>
+      <param name="s1" type="int"/>
+      <param name="l1" type="int"/>
+      <param name="b2" type="byte[]"/>
+      <param name="s2" type="int"/>
+      <param name="l2" type="int"/>
+      <doc>
+      <![CDATA[Compare two objects in binary.
+ b1[s1:l1] is the first object, and b2[s2:l2] is the second object.
+ 
+ @param b1 The first byte array.
+ @param s1 The position index in b1. The object under comparison's starting index.
+ @param l1 The length of the object in b1.
+ @param b2 The second byte array.
+ @param s2 The position index in b2. The object under comparison's starting index.
+ @param l2 The length of the object under comparison in b2.
+ @return An integer result of the comparison.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ A {@link Comparator} that operates directly on byte representations of
+ objects.
+ </p>
+ @param <T>
+ @see DeserializerComparator]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.io.RawComparator -->
+  <!-- start class org.apache.hadoop.io.SequenceFile -->
+  <class name="SequenceFile" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getDefaultCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the compression type for the reduce outputs
+ @param job the job config to look in
+ @return the kind of compression to use]]>
+      </doc>
+    </method>
+    <method name="setDefaultCompressionType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="val" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+      <doc>
+      <![CDATA[Set the default compression type for sequence files.
+ @param job the configuration to modify
+ @param val the new compression type (none, block, record)]]>
+      </doc>
+    </method>
+    <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="opts" type="org.apache.hadoop.io.SequenceFile.Writer.Option[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a new Writer with the given options.
+ @param conf the configuration to use
+ @param opts the options to create the file with
+ @return a new Writer
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="name" type="org.apache.hadoop.fs.Path"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valClass" type="java.lang.Class"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem. 
+ @param conf The configuration.
+ @param name The name of the file. 
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException
+ @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.]]>
+      </doc>
+    </method>
+    <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="name" type="org.apache.hadoop.fs.Path"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valClass" type="java.lang.Class"/>
+      <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem. 
+ @param conf The configuration.
+ @param name The name of the file. 
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException
+ @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.]]>
+      </doc>
+    </method>
+    <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="name" type="org.apache.hadoop.fs.Path"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valClass" type="java.lang.Class"/>
+      <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem. 
+ @param conf The configuration.
+ @param name The name of the file. 
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException
+ @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.]]>
+      </doc>
+    </method>
+    <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="name" type="org.apache.hadoop.fs.Path"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valClass" type="java.lang.Class"/>
+      <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+      <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem. 
+ @param conf The configuration.
+ @param name The name of the file. 
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException
+ @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.]]>
+      </doc>
+    </method>
+    <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="name" type="org.apache.hadoop.fs.Path"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valClass" type="java.lang.Class"/>
+      <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+      <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem. 
+ @param conf The configuration.
+ @param name The name of the file. 
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException
+ @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.]]>
+      </doc>
+    </method>
+    <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="name" type="org.apache.hadoop.fs.Path"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valClass" type="java.lang.Class"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+      <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param bufferSize buffer size for the underlaying outputstream.
+ @param replication replication factor for the file.
+ @param blockSize block size for the file.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException
+ @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.]]>
+      </doc>
+    </method>
+    <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="name" type="org.apache.hadoop.fs.Path"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valClass" type="java.lang.Class"/>
+      <param name="bufferSize" type="int"/>
+      <param name="replication" type="short"/>
+      <param name="blockSize" type="long"/>
+      <param name="createParent" type="boolean"/>
+      <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+      <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+      <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param bufferSize buffer size for the underlaying outputstream.
+ @param replication replication factor for the file.
+ @param blockSize block size for the file.
+ @param createParent create parent directory if non-existent
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fc" type="org.apache.hadoop.fs.FileContext"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="name" type="org.apache.hadoop.fs.Path"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valClass" type="java.lang.Class"/>
+      <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+      <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+      <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+      <param name="createFlag" type="java.util.EnumSet"/>
+      <param name="opts" type="org.apache.hadoop.fs.Options.CreateOpts[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fc The context for the specified file.
+ @param conf The configuration.
+ @param name The name of the file.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param metadata The metadata of the file.
+ @param createFlag gives the semantics of create: overwrite, append etc.
+ @param opts file creation options; see {@link CreateOpts}.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="name" type="org.apache.hadoop.fs.Path"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valClass" type="java.lang.Class"/>
+      <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+      <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct the preferred type of SequenceFile Writer.
+ @param fs The configured filesystem. 
+ @param conf The configuration.
+ @param name The name of the file. 
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param progress The Progressable object to track progress.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException
+ @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.]]>
+      </doc>
+    </method>
+    <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valClass" type="java.lang.Class"/>
+      <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+      <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+      <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @param metadata The metadata of the file.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException
+ @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.]]>
+      </doc>
+    </method>
+    <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valClass" type="java.lang.Class"/>
+      <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+      <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer.
+ @param conf The configuration.
+ @param out The stream on top which the writer is to be constructed.
+ @param keyClass The 'key' type.
+ @param valClass The 'value' type.
+ @param compressionType The compression type.
+ @param codec The compression codec.
+ @return Returns the handle to the constructed SequenceFile Writer.
+ @throws IOException
+ @deprecated Use {@link #createWriter(Configuration, Writer.Option...)}
+     instead.]]>
+      </doc>
+    </method>
+    <field name="SYNC_INTERVAL" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The number of bytes between sync points. 100 KB, default.
+ Computed as 5 KB * 20 = 100 KB]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[<code>SequenceFile</code>s are flat files consisting of binary key/value 
+ pairs.
+ 
+ <p><code>SequenceFile</code> provides {@link SequenceFile.Writer},
+ {@link SequenceFile.Reader} and {@link Sorter} classes for writing,
+ reading and sorting respectively.</p>
+ 
+ There are three <code>SequenceFile</code> <code>Writer</code>s based on the 
+ {@link CompressionType} used to compress key/value pairs:
+ <ol>
+   <li>
+   <code>Writer</code> : Uncompressed records.
+   </li>
+   <li>
+   <code>RecordCompressWriter</code> : Record-compressed files, only compress 
+                                       values.
+   </li>
+   <li>
+   <code>BlockCompressWriter</code> : Block-compressed files, both keys &amp;
+                                      values are collected in 'blocks' 
+                                      separately and compressed. The size of 
+                                      the 'block' is configurable.
+ </ol>
+ 
+ <p>The actual compression algorithm used to compress key and/or values can be
+ specified by using the appropriate {@link CompressionCodec}.</p>
+ 
+ <p>The recommended way is to use the static <tt>createWriter</tt> methods
+ provided by the <code>SequenceFile</code> to chose the preferred format.</p>
+
+ <p>The {@link SequenceFile.Reader} acts as the bridge and can read any of the
+ above <code>SequenceFile</code> formats.</p>
+
+ <h3 id="Formats">SequenceFile Formats</h3>
+ 
+ <p>Essentially there are 3 different formats for <code>SequenceFile</code>s
+ depending on the <code>CompressionType</code> specified. All of them share a
+ <a href="#Header">common header</a> described below.
+ 
+ <h4 id="Header">SequenceFile Header</h4>
+ <ul>
+   <li>
+   version - 3 bytes of magic header <b>SEQ</b>, followed by 1 byte of actual 
+             version number (e.g. SEQ4 or SEQ6)
+   </li>
+   <li>
+   keyClassName -key class
+   </li>
+   <li>
+   valueClassName - value class
+   </li>
+   <li>
+   compression - A boolean which specifies if compression is turned on for 
+                 keys/values in this file.
+   </li>
+   <li>
+   blockCompression - A boolean which specifies if block-compression is 
+                      turned on for keys/values in this file.
+   </li>
+   <li>
+   compression codec - <code>CompressionCodec</code> class which is used for  
+                       compression of keys and/or values (if compression is 
+                       enabled).
+   </li>
+   <li>
+   metadata - {@link Metadata} for this file.
+   </li>
+   <li>
+   sync - A sync marker to denote end of the header.
+   </li>
+ </ul>
+ 
+ <h5>Uncompressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+   <ul>
+     <li>Record length</li>
+     <li>Key length</li>
+     <li>Key</li>
+     <li>Value</li>
+   </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> kilobytes or so.
+ </li>
+ </ul>
+
+ <h5>Record-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record
+   <ul>
+     <li>Record length</li>
+     <li>Key length</li>
+     <li>Key</li>
+     <li><i>Compressed</i> Value</li>
+   </ul>
+ </li>
+ <li>
+ A sync-marker every few <code>100</code> kilobytes or so.
+ </li>
+ </ul>
+ 
+ <h5>Block-Compressed SequenceFile Format</h5>
+ <ul>
+ <li>
+ <a href="#Header">Header</a>
+ </li>
+ <li>
+ Record <i>Block</i>
+   <ul>
+     <li>Uncompressed number of records in the block</li>
+     <li>Compressed key-lengths block-size</li>
+     <li>Compressed key-lengths block</li>
+     <li>Compressed keys block-size</li>
+     <li>Compressed keys block</li>
+     <li>Compressed value-lengths block-size</li>
+     <li>Compressed value-lengths block</li>
+     <li>Compressed values block-size</li>
+     <li>Compressed values block</li>
+   </ul>
+ </li>
+ <li>
+ A sync-marker every block.
+ </li>
+ </ul>
+ 
+ <p>The compressed blocks of key lengths and value lengths consist of the 
+ actual lengths of individual keys/values encoded in ZeroCompressedInteger 
+ format.</p>
+ 
+ @see CompressionCodec]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.SequenceFile -->
+  <!-- start class org.apache.hadoop.io.SetFile -->
+  <class name="SetFile" extends="org.apache.hadoop.io.MapFile"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SetFile"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[A file-based set of keys.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.SetFile -->
+  <!-- start class org.apache.hadoop.io.ShortWritable -->
+  <class name="ShortWritable" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.WritableComparable"/>
+    <constructor name="ShortWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ShortWritable" type="short"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="short"/>
+      <doc>
+      <![CDATA[Set the value of this ShortWritable.]]>
+      </doc>
+    </method>
+    <method name="get" return="short"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the value of this ShortWritable.]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[read the short value]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[write short value]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Returns true iff <code>o</code> is a ShortWritable with the same value.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[hash code]]>
+      </doc>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.io.ShortWritable"/>
+      <doc>
+      <![CDATA[Compares two ShortWritable.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Short values in string format]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A WritableComparable for shorts.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.ShortWritable -->
+  <!-- start class org.apache.hadoop.io.SortedMapWritable -->
+  <class name="SortedMapWritable" extends="org.apache.hadoop.io.AbstractMapWritable"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.SortedMap"/>
+    <constructor name="SortedMapWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[default constructor.]]>
+      </doc>
+    </constructor>
+    <constructor name="SortedMapWritable" type="org.apache.hadoop.io.SortedMapWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Copy constructor.
+ 
+ @param other the map to copy from]]>
+      </doc>
+    </constructor>
+    <method name="comparator" return="java.util.Comparator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="firstKey" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="headMap" return="java.util.SortedMap"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="toKey" type="K"/>
+    </method>
+    <method name="lastKey" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="subMap" return="java.util.SortedMap"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fromKey" type="K"/>
+      <param name="toKey" type="K"/>
+    </method>
+    <method name="tailMap" return="java.util.SortedMap"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fromKey" type="K"/>
+    </method>
+    <method name="clear"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="containsKey" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Object"/>
+    </method>
+    <method name="containsValue" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="java.lang.Object"/>
+    </method>
+    <method name="entrySet" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="get" return="org.apache.hadoop.io.Writable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Object"/>
+    </method>
+    <method name="isEmpty" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="keySet" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="put" return="org.apache.hadoop.io.Writable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="org.apache.hadoop.io.Writable"/>
+    </method>
+    <method name="putAll"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="t" type="java.util.Map"/>
+    </method>
+    <method name="remove" return="org.apache.hadoop.io.Writable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Object"/>
+    </method>
+    <method name="size" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="values" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A Writable SortedMap.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.SortedMapWritable -->
+  <!-- start interface org.apache.hadoop.io.Stringifier -->
+  <interface name="Stringifier"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Closeable"/>
+    <method name="toString" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="T"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Converts the object to a string representation
+ @param obj the object to convert
+ @return the string representation of the object
+ @throws IOException if the object cannot be converted]]>
+      </doc>
+    </method>
+    <method name="fromString" return="T"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="str" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Restores the object from its string representation.
+ @param str the string representation of the object
+ @return restored object
+ @throws IOException if the object cannot be restored]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Closes this object. 
+ @throws IOException if an I/O error occurs]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Stringifier interface offers two methods to convert an object 
+ to a string representation and restore the object given its 
+ string representation.
+ @param <T> the class of the objects to stringify]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.io.Stringifier -->
+  <!-- start class org.apache.hadoop.io.Text -->
+  <class name="Text" extends="org.apache.hadoop.io.BinaryComparable"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.WritableComparable"/>
+    <constructor name="Text"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="Text" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct from a string.]]>
+      </doc>
+    </constructor>
+    <constructor name="Text" type="org.apache.hadoop.io.Text"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct from another text.]]>
+      </doc>
+    </constructor>
+    <constructor name="Text" type="byte[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct from a byte array.]]>
+      </doc>
+    </constructor>
+    <method name="copyBytes" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a copy of the bytes that is exactly the length of the data.
+ See {@link #getBytes()} for faster access to the underlying array.]]>
+      </doc>
+    </method>
+    <method name="getBytes" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the raw bytes; however, only data up to {@link #getLength()} is
+ valid. Please use {@link #copyBytes()} if you
+ need the returned array to be precisely the length of the data.]]>
+      </doc>
+    </method>
+    <method name="getLength" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the number of bytes in the byte array]]>
+      </doc>
+    </method>
+    <method name="charAt" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="position" type="int"/>
+      <doc>
+      <![CDATA[Returns the Unicode Scalar Value (32-bit integer value)
+ for the character at <code>position</code>. Note that this
+ method avoids using the converter or doing String instantiation
+ @return the Unicode scalar value at position or -1
+          if the position is invalid or points to a
+          trailing byte]]>
+      </doc>
+    </method>
+    <method name="find" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="what" type="java.lang.String"/>
+    </method>
+    <method name="find" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="what" type="java.lang.String"/>
+      <param name="start" type="int"/>
+      <doc>
+      <![CDATA[Finds any occurrence of <code>what</code> in the backing
+ buffer, starting as position <code>start</code>. The starting
+ position is measured in bytes and the return value is in
+ terms of byte position in the buffer. The backing buffer is
+ not converted to a string for this operation.
+ @return byte position of the first occurrence of the search
+         string in the UTF-8 buffer or -1 if not found]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="string" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set to contain the contents of a string.]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="utf8" type="byte[]"/>
+      <doc>
+      <![CDATA[Set to a utf8 byte array]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.io.Text"/>
+      <doc>
+      <![CDATA[copy a text.]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="utf8" type="byte[]"/>
+      <param name="start" type="int"/>
+      <param name="len" type="int"/>
+      <doc>
+      <![CDATA[Set the Text to range of bytes
+ @param utf8 the data to copy from
+ @param start the first position of the new string
+ @param len the number of bytes of the new string]]>
+      </doc>
+    </method>
+    <method name="append"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="utf8" type="byte[]"/>
+      <param name="start" type="int"/>
+      <param name="len" type="int"/>
+      <doc>
+      <![CDATA[Append a range of bytes to the end of the given text
+ @param utf8 the data to copy from
+ @param start the first position to append from utf8
+ @param len the number of bytes to append]]>
+      </doc>
+    </method>
+    <method name="clear"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Clear the string to empty.
+
+ <em>Note</em>: For performance reasons, this call does not clear the
+ underlying byte array that is retrievable via {@link #getBytes()}.
+ In order to free the byte-array memory, call {@link #set(byte[])}
+ with an empty byte array (For example, <code>new byte[0]</code>).]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Convert text back to string
+ @see java.lang.Object#toString()]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[deserialize]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <param name="maxLength" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="skip"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Skips over one Text in the input.]]>
+      </doc>
+    </method>
+    <method name="readWithKnownLength"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <param name="len" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read a Text object whose length is already known.
+ This allows creating Text from a stream which uses a different serialization
+ format.]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[serialize
+ write this object to out
+ length uses zero-compressed encoding
+ @see Writable#write(DataOutput)]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <param name="maxLength" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Returns true iff <code>o</code> is a Text with the same contents.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="decode" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="utf8" type="byte[]"/>
+      <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+      <doc>
+      <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If the input is malformed,
+ replace by a default value.]]>
+      </doc>
+    </method>
+    <method name="decode" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="utf8" type="byte[]"/>
+      <param name="start" type="int"/>
+      <param name="length" type="int"/>
+      <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+    </method>
+    <method name="decode" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="utf8" type="byte[]"/>
+      <param name="start" type="int"/>
+      <param name="length" type="int"/>
+      <param name="replace" type="boolean"/>
+      <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+      <doc>
+      <![CDATA[Converts the provided byte array to a String using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.]]>
+      </doc>
+    </method>
+    <method name="encode" return="java.nio.ByteBuffer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="string" type="java.lang.String"/>
+      <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+      <doc>
+      <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If the input is malformed,
+ invalid chars are replaced by a default value.
+ @return ByteBuffer: bytes stores at ByteBuffer.array() 
+                     and length is ByteBuffer.limit()]]>
+      </doc>
+    </method>
+    <method name="encode" return="java.nio.ByteBuffer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="string" type="java.lang.String"/>
+      <param name="replace" type="boolean"/>
+      <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/>
+      <doc>
+      <![CDATA[Converts the provided String to bytes using the
+ UTF-8 encoding. If <code>replace</code> is true, then
+ malformed input is replaced with the
+ substitution character, which is U+FFFD. Otherwise the
+ method throws a MalformedInputException.
+ @return ByteBuffer: bytes stores at ByteBuffer.array() 
+                     and length is ByteBuffer.limit()]]>
+      </doc>
+    </method>
+    <method name="readString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read a UTF8 encoded string from in]]>
+      </doc>
+    </method>
+    <method name="readString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <param name="maxLength" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read a UTF8 encoded string with a maximum size]]>
+      </doc>
+    </method>
+    <method name="writeString" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <param name="s" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write a UTF8 encoded string to out]]>
+      </doc>
+    </method>
+    <method name="writeString" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <param name="s" type="java.lang.String"/>
+      <param name="maxLength" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write a UTF8 encoded string with a maximum size to out]]>
+      </doc>
+    </method>
+    <method name="validateUTF8"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="utf8" type="byte[]"/>
+      <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+      <doc>
+      <![CDATA[Check if a byte array contains valid utf-8
+ @param utf8 byte array
+ @throws MalformedInputException if the byte array contains invalid utf-8]]>
+      </doc>
+    </method>
+    <method name="validateUTF8"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="utf8" type="byte[]"/>
+      <param name="start" type="int"/>
+      <param name="len" type="int"/>
+      <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/>
+      <doc>
+      <![CDATA[Check to see if a byte array is valid utf-8
+ @param utf8 the array of bytes
+ @param start the offset of the first byte in the array
+ @param len the length of the byte sequence
+ @throws MalformedInputException if the byte array contains invalid bytes]]>
+      </doc>
+    </method>
+    <method name="bytesToCodePoint" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bytes" type="java.nio.ByteBuffer"/>
+      <doc>
+      <![CDATA[Returns the next code point at the current position in
+ the buffer. The buffer's position will be incremented.
+ Any mark set on this buffer will be changed by this method!]]>
+      </doc>
+    </method>
+    <method name="utf8Length" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="string" type="java.lang.String"/>
+      <doc>
+      <![CDATA[For the given string, returns the number of UTF-8 bytes
+ required to encode the string.
+ @param string text to encode
+ @return number of UTF-8 bytes required to encode]]>
+      </doc>
+    </method>
+    <field name="DEFAULT_MAX_LEN" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class stores text using standard UTF8 encoding.  It provides methods
+ to serialize, deserialize, and compare texts at byte level.  The type of
+ length is integer and is serialized using zero-compressed format.  <p>In
+ addition, it provides methods for string traversal without converting the
+ byte array to a string.  <p>Also includes utilities for
+ serializing/deserialing a string, coding/decoding a string, checking if a
+ byte array contains valid UTF8 code, calculating the length of an encoded
+ string.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.Text -->
+  <!-- start class org.apache.hadoop.io.TwoDArrayWritable -->
+  <class name="TwoDArrayWritable" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="TwoDArrayWritable" type="java.lang.Class"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="TwoDArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[][]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="toArray" return="java.lang.Object"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="values" type="org.apache.hadoop.io.Writable[][]"/>
+    </method>
+    <method name="get" return="org.apache.hadoop.io.Writable[][]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A Writable for 2D arrays containing a matrix of instances of a class.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.TwoDArrayWritable -->
+  <!-- start class org.apache.hadoop.io.VersionedWritable -->
+  <class name="VersionedWritable" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="VersionedWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getVersion" return="byte"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the version number of the current implementation.]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A base class for Writables that provides version checking.
+
+ <p>This is useful when a class may evolve, so that instances written by the
+ old version of the class may still be processed by the new version.  To
+ handle this situation, {@link #readFields(DataInput)}
+ implementations should catch {@link VersionMismatchException}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.VersionedWritable -->
+  <!-- start class org.apache.hadoop.io.VersionMismatchException -->
+  <class name="VersionMismatchException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="VersionMismatchException" type="byte, byte"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns a string representation of this object.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Thrown by {@link VersionedWritable#readFields(DataInput)} when the
+ version of an object being read does not match the current implementation
+ version as returned by {@link VersionedWritable#getVersion()}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.VersionMismatchException -->
+  <!-- start class org.apache.hadoop.io.VIntWritable -->
+  <class name="VIntWritable" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.WritableComparable"/>
+    <constructor name="VIntWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="VIntWritable" type="int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="int"/>
+      <doc>
+      <![CDATA[Set the value of this VIntWritable.]]>
+      </doc>
+    </method>
+    <method name="get" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the value of this VIntWritable.]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Returns true iff <code>o</code> is a VIntWritable with the same value.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.io.VIntWritable"/>
+      <doc>
+      <![CDATA[Compares two VIntWritables.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A WritableComparable for integer values stored in variable-length format.
+ Such values take between one and five bytes.  Smaller values take fewer bytes.
+ 
+ @see org.apache.hadoop.io.WritableUtils#readVInt(DataInput)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.VIntWritable -->
+  <!-- start class org.apache.hadoop.io.VLongWritable -->
+  <class name="VLongWritable" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.WritableComparable"/>
+    <constructor name="VLongWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="VLongWritable" type="long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Set the value of this LongWritable.]]>
+      </doc>
+    </method>
+    <method name="get" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the value of this LongWritable.]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Returns true iff <code>o</code> is a VLongWritable with the same value.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.io.VLongWritable"/>
+      <doc>
+      <![CDATA[Compares two VLongWritables.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A WritableComparable for longs in a variable-length format. Such values take
+  between one and five bytes.  Smaller values take fewer bytes.
+  
+  @see org.apache.hadoop.io.WritableUtils#readVLong(DataInput)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.VLongWritable -->
+  <!-- start interface org.apache.hadoop.io.Writable -->
+  <interface name="Writable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="write"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Serialize the fields of this object to <code>out</code>.
+ 
+ @param out <code>DataOuput</code> to serialize this object into.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Deserialize the fields of this object from <code>in</code>.  
+ 
+ <p>For efficiency, implementations should attempt to re-use storage in the 
+ existing object where possible.</p>
+ 
+ @param in <code>DataInput</code> to deseriablize this object from.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A serializable object which implements a simple, efficient, serialization 
+ protocol, based on {@link DataInput} and {@link DataOutput}.
+
+ <p>Any <code>key</code> or <code>value</code> type in the Hadoop Map-Reduce
+ framework implements this interface.</p>
+ 
+ <p>Implementations typically implement a static <code>read(DataInput)</code>
+ method which constructs a new instance, calls {@link #readFields(DataInput)} 
+ and returns the instance.</p>
+ 
+ <p>Example:</p>
+ <blockquote><pre>
+     public class MyWritable implements Writable {
+       // Some data
+       private int counter;
+       private long timestamp;
+
+       // Default constructor to allow (de)serialization
+       MyWritable() { }
+
+       public void write(DataOutput out) throws IOException {
+         out.writeInt(counter);
+         out.writeLong(timestamp);
+       }
+
+       public void readFields(DataInput in) throws IOException {
+         counter = in.readInt();
+         timestamp = in.readLong();
+       }
+
+       public static MyWritable read(DataInput in) throws IOException {
+         MyWritable w = new MyWritable();
+         w.readFields(in);
+         return w;
+       }
+     }
+ </pre></blockquote>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.io.Writable -->
+  <!-- start interface org.apache.hadoop.io.WritableComparable -->
+  <interface name="WritableComparable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <implements name="java.lang.Comparable"/>
+    <doc>
+    <![CDATA[A {@link Writable} which is also {@link Comparable}. 
+
+ <p><code>WritableComparable</code>s can be compared to each other, typically 
+ via <code>Comparator</code>s. Any type which is to be used as a 
+ <code>key</code> in the Hadoop Map-Reduce framework should implement this
+ interface.</p>
+
+ <p>Note that <code>hashCode()</code> is frequently used in Hadoop to partition
+ keys. It's important that your implementation of hashCode() returns the same 
+ result across different instances of the JVM. Note also that the default 
+ <code>hashCode()</code> implementation in <code>Object</code> does <b>not</b>
+ satisfy this property.</p>
+  
+ <p>Example:</p>
+ <blockquote><pre>
+     public class MyWritableComparable implements
+      WritableComparable{@literal <MyWritableComparable>} {
+       // Some data
+       private int counter;
+       private long timestamp;
+       
+       public void write(DataOutput out) throws IOException {
+         out.writeInt(counter);
+         out.writeLong(timestamp);
+       }
+       
+       public void readFields(DataInput in) throws IOException {
+         counter = in.readInt();
+         timestamp = in.readLong();
+       }
+       
+       public int compareTo(MyWritableComparable o) {
+         int thisValue = this.value;
+         int thatValue = o.value;
+         return (thisValue &lt; thatValue ? -1 : (thisValue==thatValue ? 0 : 1));
+       }
+
+       public int hashCode() {
+         final int prime = 31;
+         int result = 1;
+         result = prime * result + counter;
+         result = prime * result + (int) (timestamp ^ (timestamp &gt;&gt;&gt; 32));
+         return result
+       }
+     }
+ </pre></blockquote>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.io.WritableComparable -->
+  <!-- start class org.apache.hadoop.io.WritableComparator -->
+  <class name="WritableComparator" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.RawComparator"/>
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="WritableComparator"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="WritableComparator" type="java.lang.Class"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct for a {@link WritableComparable} implementation.]]>
+      </doc>
+    </constructor>
+    <constructor name="WritableComparator" type="java.lang.Class, boolean"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="WritableComparator" type="java.lang.Class, org.apache.hadoop.conf.Configuration, boolean"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="get" return="org.apache.hadoop.io.WritableComparator"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="c" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[For backwards compatibility.]]>
+      </doc>
+    </method>
+    <method name="get" return="org.apache.hadoop.io.WritableComparator"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="c" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get a comparator for a {@link WritableComparable} implementation.]]>
+      </doc>
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="define"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="c" type="java.lang.Class"/>
+      <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/>
+      <doc>
+      <![CDATA[Register an optimized comparator for a {@link WritableComparable}
+ implementation. Comparators registered with this method must be
+ thread-safe.]]>
+      </doc>
+    </method>
+    <method name="getKeyClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the WritableComparable implementation class.]]>
+      </doc>
+    </method>
+    <method name="newKey" return="org.apache.hadoop.io.WritableComparable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a new {@link WritableComparable} instance.]]>
+      </doc>
+    </method>
+    <method name="compare" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b1" type="byte[]"/>
+      <param name="s1" type="int"/>
+      <param name="l1" type="int"/>
+      <param name="b2" type="byte[]"/>
+      <param name="s2" type="int"/>
+      <param name="l2" type="int"/>
+      <doc>
+      <![CDATA[Optimization hook.  Override this to make SequenceFile.Sorter's scream.
+
+ <p>The default implementation reads the data into two {@link
+ WritableComparable}s (using {@link
+ Writable#readFields(DataInput)}, then calls {@link
+ #compare(WritableComparable,WritableComparable)}.]]>
+      </doc>
+    </method>
+    <method name="compare" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="a" type="org.apache.hadoop.io.WritableComparable"/>
+      <param name="b" type="org.apache.hadoop.io.WritableComparable"/>
+      <doc>
+      <![CDATA[Compare two WritableComparables.
+
+ <p> The default implementation uses the natural ordering, calling {@link
+ Comparable#compareTo(Object)}.]]>
+      </doc>
+    </method>
+    <method name="compare" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="a" type="java.lang.Object"/>
+      <param name="b" type="java.lang.Object"/>
+    </method>
+    <method name="compareBytes" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b1" type="byte[]"/>
+      <param name="s1" type="int"/>
+      <param name="l1" type="int"/>
+      <param name="b2" type="byte[]"/>
+      <param name="s2" type="int"/>
+      <param name="l2" type="int"/>
+      <doc>
+      <![CDATA[Lexicographic order of binary data.]]>
+      </doc>
+    </method>
+    <method name="hashBytes" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bytes" type="byte[]"/>
+      <param name="offset" type="int"/>
+      <param name="length" type="int"/>
+      <doc>
+      <![CDATA[Compute hash for binary data.]]>
+      </doc>
+    </method>
+    <method name="hashBytes" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bytes" type="byte[]"/>
+      <param name="length" type="int"/>
+      <doc>
+      <![CDATA[Compute hash for binary data.]]>
+      </doc>
+    </method>
+    <method name="readUnsignedShort" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bytes" type="byte[]"/>
+      <param name="start" type="int"/>
+      <doc>
+      <![CDATA[Parse an unsigned short from a byte array.]]>
+      </doc>
+    </method>
+    <method name="readInt" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bytes" type="byte[]"/>
+      <param name="start" type="int"/>
+      <doc>
+      <![CDATA[Parse an integer from a byte array.]]>
+      </doc>
+    </method>
+    <method name="readFloat" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bytes" type="byte[]"/>
+      <param name="start" type="int"/>
+      <doc>
+      <![CDATA[Parse a float from a byte array.]]>
+      </doc>
+    </method>
+    <method name="readLong" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bytes" type="byte[]"/>
+      <param name="start" type="int"/>
+      <doc>
+      <![CDATA[Parse a long from a byte array.]]>
+      </doc>
+    </method>
+    <method name="readDouble" return="double"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bytes" type="byte[]"/>
+      <param name="start" type="int"/>
+      <doc>
+      <![CDATA[Parse a double from a byte array.]]>
+      </doc>
+    </method>
+    <method name="readVLong" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bytes" type="byte[]"/>
+      <param name="start" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it.
+ @param bytes byte array with decode long
+ @param start starting index
+ @throws java.io.IOException 
+ @return deserialized long]]>
+      </doc>
+    </method>
+    <method name="readVInt" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bytes" type="byte[]"/>
+      <param name="start" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it.
+ @param bytes byte array with the encoded integer
+ @param start start index
+ @throws java.io.IOException 
+ @return deserialized integer]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A Comparator for {@link WritableComparable}s.
+
+ <p>This base implementation uses the natural ordering.  To define alternate
+ orderings, override {@link #compare(WritableComparable,WritableComparable)}.
+
+ <p>One may optimize compare-intensive operations by overriding
+ {@link #compare(byte[],int,int,byte[],int,int)}.  Static utility methods are
+ provided to assist in optimized implementations of this method.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.WritableComparator -->
+  <!-- start class org.apache.hadoop.io.WritableFactories -->
+  <class name="WritableFactories" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="setFactory"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="c" type="java.lang.Class"/>
+      <param name="factory" type="org.apache.hadoop.io.WritableFactory"/>
+      <doc>
+      <![CDATA[Define a factory for a class.]]>
+      </doc>
+    </method>
+    <method name="getFactory" return="org.apache.hadoop.io.WritableFactory"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="c" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Define a factory for a class.]]>
+      </doc>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.io.Writable"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="c" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Create a new instance of a class with a defined factory.]]>
+      </doc>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.io.Writable"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="c" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Create a new instance of a class with a defined factory.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Factories for non-public writables.  Defining a factory permits {@link
+ ObjectWritable} to be able to construct instances of non-public classes.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.WritableFactories -->
+  <!-- start interface org.apache.hadoop.io.WritableFactory -->
+  <interface name="WritableFactory"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="newInstance" return="org.apache.hadoop.io.Writable"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return a new instance.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A factory for a class of Writable.
+ @see WritableFactories]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.io.WritableFactory -->
+  <!-- start class org.apache.hadoop.io.WritableUtils -->
+  <class name="WritableUtils" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="WritableUtils"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="readCompressedByteArray" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="skipCompressedByteArray"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="writeCompressedByteArray" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <param name="bytes" type="byte[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readCompressedString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="writeCompressedString" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <param name="s" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="writeString"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <param name="s" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="writeStringArray"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <param name="s" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="writeCompressedStringArray"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <param name="s" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readStringArray" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readCompressedStringArray" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="displayByteArray"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="record" type="byte[]"/>
+    </method>
+    <method name="clone" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="orig" type="T"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Make a copy of a writable object using serialization to a buffer.
+ @param orig The object to copy
+ @return The copied object]]>
+      </doc>
+    </method>
+    <method name="cloneInto"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="use ReflectionUtils.cloneInto instead.">
+      <param name="dst" type="org.apache.hadoop.io.Writable"/>
+      <param name="src" type="org.apache.hadoop.io.Writable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Make a copy of the writable object using serialization to a buffer
+ @param dst the object to copy from
+ @param src the object to copy into, which is destroyed
+ @throws IOException
+ @deprecated use ReflectionUtils.cloneInto instead.]]>
+      </doc>
+    </method>
+    <method name="writeVInt"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="stream" type="java.io.DataOutput"/>
+      <param name="i" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Serializes an integer to a binary stream with zero-compressed encoding.
+ For -112 {@literal <=} i {@literal <=} 127, only one byte is used with the
+ actual value.
+ For other values of i, the first byte value indicates whether the
+ integer is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -116, the following integer
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -124, the following integer
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+
+ @param stream Binary output stream
+ @param i Integer to be serialized
+ @throws java.io.IOException]]>
+      </doc>
+    </method>
+    <method name="writeVLong"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="stream" type="java.io.DataOutput"/>
+      <param name="i" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Serializes a long to a binary stream with zero-compressed encoding.
+ For -112 {@literal <=} i {@literal <=} 127, only one byte is used with the
+ actual value.
+ For other values of i, the first byte value indicates whether the
+ long is positive or negative, and the number of bytes that follow.
+ If the first byte value v is between -113 and -120, the following long
+ is positive, with number of bytes that follow are -(v+112).
+ If the first byte value v is between -121 and -128, the following long
+ is negative, with number of bytes that follow are -(v+120). Bytes are
+ stored in the high-non-zero-byte-first order.
+ 
+ @param stream Binary output stream
+ @param i Long to be serialized
+ @throws java.io.IOException]]>
+      </doc>
+    </method>
+    <method name="readVLong" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="stream" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Reads a zero-compressed encoded long from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException 
+ @return deserialized long from stream.]]>
+      </doc>
+    </method>
+    <method name="readVInt" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="stream" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Reads a zero-compressed encoded integer from input stream and returns it.
+ @param stream Binary input stream
+ @throws java.io.IOException 
+ @return deserialized integer from stream.]]>
+      </doc>
+    </method>
+    <method name="readVIntInRange" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="stream" type="java.io.DataInput"/>
+      <param name="lower" type="int"/>
+      <param name="upper" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Reads an integer from the input stream and returns it.
+
+ This function validates that the integer is between [lower, upper],
+ inclusive.
+
+ @param stream Binary input stream
+ @throws java.io.IOException
+ @return deserialized integer from stream]]>
+      </doc>
+    </method>
+    <method name="isNegativeVInt" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="byte"/>
+      <doc>
+      <![CDATA[Given the first byte of a vint/vlong, determine the sign
+ @param value the first byte
+ @return is the value negative]]>
+      </doc>
+    </method>
+    <method name="decodeVIntSize" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="byte"/>
+      <doc>
+      <![CDATA[Parse the first byte of a vint/vlong to determine the number of bytes
+ @param value the first byte of the vint/vlong
+ @return the total number of bytes (1 to 9)]]>
+      </doc>
+    </method>
+    <method name="getVIntSize" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="i" type="long"/>
+      <doc>
+      <![CDATA[Get the encoded length if an integer is stored in a variable-length format
+ @return the encoded length]]>
+      </doc>
+    </method>
+    <method name="readEnum" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <param name="enumType" type="java.lang.Class"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read an Enum value from DataInput, Enums are read and written 
+ using String values. 
+ @param <T> Enum type
+ @param in DataInput to read from 
+ @param enumType Class type of Enum
+ @return Enum represented by String read from DataInput
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="writeEnum"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <param name="enumVal" type="java.lang.Enum"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[writes String value of enum to DataOutput. 
+ @param out Dataoutput stream
+ @param enumVal enum value
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="skipFully"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <param name="len" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Skip <i>len</i> number of bytes in input stream<i>in</i>
+ @param in input stream
+ @param len number of bytes to skip
+ @throws IOException when skipped less number of bytes]]>
+      </doc>
+    </method>
+    <method name="toByteArray" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="writables" type="org.apache.hadoop.io.Writable[]"/>
+      <doc>
+      <![CDATA[Convert writables to a byte array]]>
+      </doc>
+    </method>
+    <method name="readStringSafely" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <param name="maxLength" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+      <doc>
+      <![CDATA[Read a string, but check it for sanity. The format consists of a vint
+ followed by the given number of bytes.
+ @param in the stream to read from
+ @param maxLength the largest acceptable length of the encoded string
+ @return the bytes as a string
+ @throws IOException if reading from the DataInput fails
+ @throws IllegalArgumentException if the encoded byte size for string 
+             is negative or larger than maxSize. Only the vint is read.]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.io.WritableUtils -->
+  <doc>
+  <![CDATA[Generic i/o code for use when reading and writing data to the network,
+to databases, and to files.]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.io.compress">
+  <!-- start class org.apache.hadoop.io.compress.BlockCompressorStream -->
+  <class name="BlockCompressorStream" extends="org.apache.hadoop.io.compress.CompressorStream"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="BlockCompressorStream" type="java.io.OutputStream, org.apache.hadoop.io.compress.Compressor, int, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a {@link BlockCompressorStream}.
+ 
+ @param out stream
+ @param compressor compressor to be used
+ @param bufferSize size of buffer
+ @param compressionOverhead maximum 'overhead' of the compression 
+                            algorithm with given bufferSize]]>
+      </doc>
+    </constructor>
+    <constructor name="BlockCompressorStream" type="java.io.OutputStream, org.apache.hadoop.io.compress.Compressor"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a {@link BlockCompressorStream} with given output-stream and 
+ compressor.
+ Use default of 512 as bufferSize and compressionOverhead of 
+ (1% of bufferSize + 12 bytes) =  18 bytes (zlib algorithm).
+ 
+ @param out stream
+ @param compressor compressor to be used]]>
+      </doc>
+    </constructor>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write the data provided to the compression codec, compressing no more
+ than the buffer size less the compression overhead as specified during
+ construction for each block.
+
+ Each block contains the uncompressed length for the block, followed by
+ one or more length-prefixed blocks of compressed data.]]>
+      </doc>
+    </method>
+    <method name="finish"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="compress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A {@link org.apache.hadoop.io.compress.CompressorStream} which works
+ with 'block-based' based compression algorithms, as opposed to 
+ 'stream-based' compression algorithms.
+
+ It should be noted that this wrapper does not guarantee that blocks will
+ be sized for the compressor. If the
+ {@link org.apache.hadoop.io.compress.Compressor} requires buffering to
+ effect meaningful compression, it is responsible for it.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.compress.BlockCompressorStream -->
+  <!-- start class org.apache.hadoop.io.compress.BlockDecompressorStream -->
+  <class name="BlockDecompressorStream" extends="org.apache.hadoop.io.compress.DecompressorStream"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="BlockDecompressorStream" type="java.io.InputStream, org.apache.hadoop.io.compress.Decompressor, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a {@link BlockDecompressorStream}.
+ 
+ @param in input stream
+ @param decompressor decompressor to use
+ @param bufferSize size of buffer
+ @throws IOException]]>
+      </doc>
+    </constructor>
+    <constructor name="BlockDecompressorStream" type="java.io.InputStream, org.apache.hadoop.io.compress.Decompressor"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a {@link BlockDecompressorStream}.
+ 
+ @param in input stream
+ @param decompressor decompressor to use
+ @throws IOException]]>
+      </doc>
+    </constructor>
+    <constructor name="BlockDecompressorStream" type="java.io.InputStream"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="decompress" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getCompressedData" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="resetState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A {@link org.apache.hadoop.io.compress.DecompressorStream} which works
+ with 'block-based' based compression algorithms, as opposed to 
+ 'stream-based' compression algorithms.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.compress.BlockDecompressorStream -->
+  <!-- start class org.apache.hadoop.io.compress.BZip2Codec -->
+  <class name="BZip2Codec" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <implements name="org.apache.hadoop.io.compress.SplittableCompressionCodec"/>
+    <constructor name="BZip2Codec"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Creates a new instance of BZip2Codec.]]>
+      </doc>
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Set the configuration to be used by this object.
+
+ @param conf the configuration object.]]>
+      </doc>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the configuration used by this object.
+
+ @return the configuration object used by this objec.]]>
+      </doc>
+    </method>
+    <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.OutputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream}.
+
+ @param out        the location for the final output stream
+ @return a stream the user can write uncompressed data to, to have it 
+         compressed
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.OutputStream"/>
+      <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a {@link CompressionOutputStream} that will write to the given
+ {@link OutputStream} with the given {@link Compressor}.
+
+ @param out        the location for the final output stream
+ @param compressor compressor to use
+ @return a stream the user can write uncompressed data to, to have it 
+         compressed
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getCompressorType" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
+
+ @return the type of compressor needed by this codec.]]>
+      </doc>
+    </method>
+    <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new {@link Compressor} for use by this {@link CompressionCodec}.
+
+ @return a new compressor for use by this codec]]>
+      </doc>
+    </method>
+    <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a {@link CompressionInputStream} that will read from the given
+ input stream and return a stream for uncompressed data.
+
+ @param in the stream to read compressed bytes from
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a {@link CompressionInputStream} that will read from the given
+ {@link InputStream} with the given {@link Decompressor}, and return a 
+ stream for uncompressed data.
+
+ @param in           the stream to read compressed bytes from
+ @param decompressor decompressor to use
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createInputStream" return="org.apache.hadoop.io.compress.SplitCompressionInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="seekableIn" type="java.io.InputStream"/>
+      <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+      <param name="start" type="long"/>
+      <param name="end" type="long"/>
+      <param name="readMode" type="org.apache.hadoop.io.compress.SplittableCompressionCodec.READ_MODE"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Creates CompressionInputStream to be used to read off uncompressed data
+ in one of the two reading modes. i.e. Continuous or Blocked reading modes
+
+ @param seekableIn The InputStream
+ @param start The start offset into the compressed stream
+ @param end The end offset into the compressed stream
+ @param readMode Controls whether progress is reported continuously or
+                 only at block boundaries.
+
+ @return CompressionInputStream for BZip2 aligned at block boundaries]]>
+      </doc>
+    </method>
+    <method name="getDecompressorType" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
+
+ @return the type of decompressor needed by this codec.]]>
+      </doc>
+    </method>
+    <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
+
+ @return a new decompressor for use by this codec]]>
+      </doc>
+    </method>
+    <method name="getDefaultExtension" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[.bz2 is recognized as the default extension for compressed BZip2 files
+
+ @return A String telling the default bzip2 file extension]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class provides output and input streams for bzip2 compression
+ and decompression.  It uses the native bzip2 library on the system
+ if possible, else it uses a pure-Java implementation of the bzip2
+ algorithm.  The configuration parameter
+ io.compression.codec.bzip2.library can be used to control this
+ behavior.
+
+ In the pure-Java mode, the Compressor and Decompressor interfaces
+ are not implemented.  Therefore, in that mode, those methods of
+ CompressionCodec which have a Compressor or Decompressor type
+ argument, throw UnsupportedOperationException.
+
+ Currently, support for splittability is available only in the
+ pure-Java mode; therefore, if a SplitCompressionInputStream is
+ requested, the pure-Java implementation is used, regardless of the
+ setting of the configuration parameter mentioned above.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.compress.BZip2Codec -->
+  <!-- start class org.apache.hadoop.io.compress.CodecConstants -->
+  <class name="CodecConstants" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <field name="DEFAULT_CODEC_EXTENSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default extension for {@link org.apache.hadoop.io.compress.DefaultCodec}.]]>
+      </doc>
+    </field>
+    <field name="BZIP2_CODEC_EXTENSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default extension for {@link org.apache.hadoop.io.compress.BZip2Codec}.]]>
+      </doc>
+    </field>
+    <field name="GZIP_CODEC_EXTENSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default extension for {@link org.apache.hadoop.io.compress.GzipCodec}.]]>
+      </doc>
+    </field>
+    <field name="LZ4_CODEC_EXTENSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default extension for {@link org.apache.hadoop.io.compress.Lz4Codec}.]]>
+      </doc>
+    </field>
+    <field name="PASSTHROUGH_CODEC_EXTENSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default extension for
+ {@link org.apache.hadoop.io.compress.PassthroughCodec}.]]>
+      </doc>
+    </field>
+    <field name="SNAPPY_CODEC_EXTENSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default extension for {@link org.apache.hadoop.io.compress.SnappyCodec}.]]>
+      </doc>
+    </field>
+    <field name="ZSTANDARD_CODEC_EXTENSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default extension for {@link org.apache.hadoop.io.compress.ZStandardCodec}.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[Codec related constants.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.compress.CodecConstants -->
+  <!-- start class org.apache.hadoop.io.compress.CodecPool -->
+  <class name="CodecPool" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CodecPool"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getCompressor" return="org.apache.hadoop.io.compress.Compressor"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get a {@link Compressor} for the given {@link CompressionCodec} from the 
+ pool or a new one.
+
+ @param codec the <code>CompressionCodec</code> for which to get the 
+              <code>Compressor</code>
+ @param conf the <code>Configuration</code> object which contains confs for creating or reinit the compressor
+ @return <code>Compressor</code> for the given 
+         <code>CompressionCodec</code> from the pool or a new one]]>
+      </doc>
+    </method>
+    <method name="getCompressor" return="org.apache.hadoop.io.compress.Compressor"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+    </method>
+    <method name="getDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+      <doc>
+      <![CDATA[Get a {@link Decompressor} for the given {@link CompressionCodec} from the
+ pool or a new one.
+  
+ @param codec the <code>CompressionCodec</code> for which to get the 
+              <code>Decompressor</code>
+ @return <code>Decompressor</code> for the given 
+         <code>CompressionCodec</code> the pool or a new one]]>
+      </doc>
+    </method>
+    <method name="returnCompressor"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+      <doc>
+      <![CDATA[Return the {@link Compressor} to the pool.
+ 
+ @param compressor the <code>Compressor</code> to be returned to the pool]]>
+      </doc>
+    </method>
+    <method name="returnDecompressor"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+      <doc>
+      <![CDATA[Return the {@link Decompressor} to the pool.
+ 
+ @param decompressor the <code>Decompressor</code> to be returned to the 
+                     pool]]>
+      </doc>
+    </method>
+    <method name="getLeasedCompressorsCount" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+      <doc>
+      <![CDATA[Return the number of leased {@link Compressor}s for this
+ {@link CompressionCodec}]]>
+      </doc>
+    </method>
+    <method name="getLeasedDecompressorsCount" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/>
+      <doc>
+      <![CDATA[Return the number of leased {@link Decompressor}s for this
+ {@link CompressionCodec}]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A global compressor/decompressor pool used to save and reuse 
+ (possibly native) compression/decompression codecs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.compress.CodecPool -->
+  <!-- start interface org.apache.hadoop.io.compress.CompressionCodec -->
+  <interface name="CompressionCodec"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.OutputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a {@link CompressionOutputStream} that will write to the given 
+ {@link OutputStream}.
+ 
+ @param out the location for the final output stream
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.OutputStream"/>
+      <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a {@link CompressionOutputStream} that will write to the given 
+ {@link OutputStream} with the given {@link Compressor}.
+ 
+ @param out the location for the final output stream
+ @param compressor compressor to use
+ @return a stream the user can write uncompressed data to have it compressed
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getCompressorType" return="java.lang.Class"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
+ 
+ @return the type of compressor needed by this codec.]]>
+      </doc>
+    </method>
+    <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new {@link Compressor} for use by this {@link CompressionCodec}.
+ 
+ @return a new compressor for use by this codec]]>
+      </doc>
+    </method>
+    <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a {@link CompressionInputStream} that will read from the given
+ input stream.
+ 
+ @param in the stream to read compressed bytes from
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a {@link CompressionInputStream} that will read from the given 
+ {@link InputStream} with the given {@link Decompressor}.
+ 
+ @param in the stream to read compressed bytes from
+ @param decompressor decompressor to use
+ @return a stream to read uncompressed bytes from
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getDecompressorType" return="java.lang.Class"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
+ 
+ @return the type of decompressor needed by this codec.]]>
+      </doc>
+    </method>
+    <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
+ 
+ @return a new decompressor for use by this codec]]>
+      </doc>
+    </method>
+    <method name="getDefaultExtension" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the default filename extension for this kind of compression.
+ @return the extension including the '.']]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class encapsulates a streaming compression/decompression pair.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.io.compress.CompressionCodec -->
+  <!-- start class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+  <class name="CompressionCodecFactory" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CompressionCodecFactory" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Find the codecs specified in the config value io.compression.codecs 
+ and register them. Defaults to gzip and deflate.]]>
+      </doc>
+    </constructor>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Print the extension map out as a string.]]>
+      </doc>
+    </method>
+    <method name="getCodecClasses" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the list of codecs discovered via a Java ServiceLoader, or
+ listed in the configuration. Codecs specified in configuration come
+ later in the returned list, and are considered to override those
+ from the ServiceLoader.
+ @param conf the configuration to look in
+ @return a list of the {@link CompressionCodec} classes]]>
+      </doc>
+    </method>
+    <method name="setCodecClasses"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="classes" type="java.util.List"/>
+      <doc>
+      <![CDATA[Sets a list of codec classes in the configuration. In addition to any
+ classes specified using this method, {@link CompressionCodec} classes on
+ the classpath are discovered using a Java ServiceLoader.
+ @param conf the configuration to modify
+ @param classes the list of classes to set]]>
+      </doc>
+    </method>
+    <method name="getCodec" return="org.apache.hadoop.io.compress.CompressionCodec"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Find the relevant compression codec for the given file based on its
+ filename suffix.
+ @param file the filename to check
+ @return the codec object]]>
+      </doc>
+    </method>
+    <method name="getCodecByClassName" return="org.apache.hadoop.io.compress.CompressionCodec"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="classname" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Find the relevant compression codec for the codec's canonical class name.
+ @param classname the canonical class name of the codec
+ @return the codec object]]>
+      </doc>
+    </method>
+    <method name="getCodecByName" return="org.apache.hadoop.io.compress.CompressionCodec"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="codecName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Find the relevant compression codec for the codec's canonical class name
+ or by codec alias.
+ <p>
+ Codec aliases are case insensitive.
+ <p>
+ The code alias is the short class name (without the package name).
+ If the short class name ends with 'Codec', then there are two aliases for
+ the codec, the complete short class name and the short class name without
+ the 'Codec' ending. For example for the 'GzipCodec' codec class name the
+ alias are 'gzip' and 'gzipcodec'.
+
+ @param codecName the canonical class name of the codec
+ @return the codec object]]>
+      </doc>
+    </method>
+    <method name="getCodecClassByName" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="codecName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Find the relevant compression codec for the codec's canonical class name
+ or by codec alias and returns its implemetation class.
+ <p>
+ Codec aliases are case insensitive.
+ <p>
+ The code alias is the short class name (without the package name).
+ If the short class name ends with 'Codec', then there are two aliases for
+ the codec, the complete short class name and the short class name without
+ the 'Codec' ending. For example for the 'GzipCodec' codec class name the
+ alias are 'gzip' and 'gzipcodec'.
+
+ @param codecName the canonical class name of the codec
+ @return the codec class]]>
+      </doc>
+    </method>
+    <method name="removeSuffix" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filename" type="java.lang.String"/>
+      <param name="suffix" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Removes a suffix from a filename, if it has it.
+ @param filename the filename to strip
+ @param suffix the suffix to remove
+ @return the shortened filename]]>
+      </doc>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[A little test program.
+ @param args]]>
+      </doc>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A factory that will find the correct codec for a given filename.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.compress.CompressionCodecFactory -->
+  <!-- start class org.apache.hadoop.io.compress.CompressionInputStream -->
+  <class name="CompressionInputStream" extends="java.io.InputStream"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.fs.Seekable"/>
+    <implements name="org.apache.hadoop.fs.statistics.IOStatisticsSource"/>
+    <constructor name="CompressionInputStream" type="java.io.InputStream"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a compression input stream that reads
+ the decompressed bytes from the given stream.
+ 
+ @param in The input stream to be compressed.
+ @throws IOException]]>
+      </doc>
+    </constructor>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getIOStatistics" return="org.apache.hadoop.fs.statistics.IOStatistics"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return any IOStatistics provided by the underlying stream.
+ @return IO stats from the inner stream.]]>
+      </doc>
+    </method>
+    <method name="read" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read bytes from the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+      </doc>
+    </method>
+    <method name="resetState"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Reset the decompressor to its initial state and discard any buffered data,
+ as the underlying stream may have been repositioned.]]>
+      </doc>
+    </method>
+    <method name="getPos" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method returns the current position in the stream.
+
+ @return Current position in stream as a long]]>
+      </doc>
+    </method>
+    <method name="seek"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pos" type="long"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+      <doc>
+      <![CDATA[This method is current not supported.
+
+ @throws UnsupportedOperationException]]>
+      </doc>
+    </method>
+    <method name="seekToNewSource" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="targetPos" type="long"/>
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+      <doc>
+      <![CDATA[This method is current not supported.
+
+ @throws UnsupportedOperationException]]>
+      </doc>
+    </method>
+    <field name="in" type="java.io.InputStream"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The input stream to be compressed.]]>
+      </doc>
+    </field>
+    <field name="maxAvailableData" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A compression input stream.
+
+ <p>Implementations are assumed to be buffered.  This permits clients to
+ reposition the underlying input stream then call {@link #resetState()},
+ without having to also synchronize client buffers.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.compress.CompressionInputStream -->
+  <!-- start class org.apache.hadoop.io.compress.CompressionOutputStream -->
+  <class name="CompressionOutputStream" extends="java.io.OutputStream"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.fs.statistics.IOStatisticsSource"/>
+    <constructor name="CompressionOutputStream" type="java.io.OutputStream"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a compression output stream that writes
+ the compressed bytes to the given stream.
+ @param out]]>
+      </doc>
+    </constructor>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="flush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write compressed bytes to the stream.
+ Made abstract to prevent leakage to underlying stream.]]>
+      </doc>
+    </method>
+    <method name="finish"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Finishes writing compressed data to the output stream 
+ without closing the underlying stream.]]>
+      </doc>
+    </method>
+    <method name="resetState"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Reset the compression to the initial state. 
+ Does not reset the underlying stream.]]>
+      </doc>
+    </method>
+    <method name="getIOStatistics" return="org.apache.hadoop.fs.statistics.IOStatistics"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return any IOStatistics provided by the underlying stream.
+ @return IO stats from the inner stream.]]>
+      </doc>
+    </method>
+    <field name="out" type="java.io.OutputStream"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The output stream to be compressed.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[A compression output stream.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.compress.CompressionOutputStream -->
+  <!-- start interface org.apache.hadoop.io.compress.Compressor -->
+  <interface name="Compressor"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="setInput"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <doc>
+      <![CDATA[Sets input data for compression. 
+ This should be called whenever #needsInput() returns 
+ <code>true</code> indicating that more input data is required.
+ 
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+      </doc>
+    </method>
+    <method name="needsInput" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns true if the input data buffer is empty and 
+ #setInput() should be called to provide more input. 
+ 
+ @return <code>true</code> if the input data buffer is empty and 
+ #setInput() should be called in order to provide more input.]]>
+      </doc>
+    </method>
+    <method name="setDictionary"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <doc>
+      <![CDATA[Sets preset dictionary for compression. A preset dictionary 
+ is used when the history buffer can be predetermined. 
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+      </doc>
+    </method>
+    <method name="getBytesRead" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return number of uncompressed bytes input so far.]]>
+      </doc>
+    </method>
+    <method name="getBytesWritten" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return number of compressed bytes output so far.]]>
+      </doc>
+    </method>
+    <method name="finish"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[When called, indicates that compression should end
+ with the current contents of the input buffer.]]>
+      </doc>
+    </method>
+    <method name="finished" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns true if the end of the compressed 
+ data output stream has been reached.
+ @return <code>true</code> if the end of the compressed
+ data output stream has been reached.]]>
+      </doc>
+    </method>
+    <method name="compress" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Fills specified buffer with compressed data. Returns actual number
+ of bytes of compressed data. A return value of 0 indicates that
+ needsInput() should be called in order to determine if more input
+ data is required.
+ 
+ @param b Buffer for the compressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of compressed data.]]>
+      </doc>
+    </method>
+    <method name="reset"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Resets compressor so that a new set of input data can be processed.]]>
+      </doc>
+    </method>
+    <method name="end"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Closes the compressor and discards any unprocessed input.]]>
+      </doc>
+    </method>
+    <method name="reinit"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Prepare the compressor to be used in a new stream with settings defined in
+ the given Configuration
+ 
+ @param conf Configuration from which new setting are fetched]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Specification of a stream-based 'compressor' which can be  
+ plugged into a {@link CompressionOutputStream} to compress data.
+ This is modelled after {@link java.util.zip.Deflater}]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.io.compress.Compressor -->
+  <!-- start class org.apache.hadoop.io.compress.CompressorStream -->
+  <class name="CompressorStream" extends="org.apache.hadoop.io.compress.CompressionOutputStream"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CompressorStream" type="java.io.OutputStream, org.apache.hadoop.io.compress.Compressor, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="CompressorStream" type="java.io.OutputStream, org.apache.hadoop.io.compress.Compressor"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="CompressorStream" type="java.io.OutputStream"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Allow derived classes to directly set the underlying stream.
+ 
+ @param out Underlying output stream.]]>
+      </doc>
+    </constructor>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="compress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="finish"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="resetState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="compressor" type="org.apache.hadoop.io.compress.Compressor"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="buffer" type="byte[]"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="closed" type="boolean"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.io.compress.CompressorStream -->
+  <!-- start interface org.apache.hadoop.io.compress.Decompressor -->
+  <interface name="Decompressor"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="setInput"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <doc>
+      <![CDATA[Sets input data for decompression. 
+ This should be called if and only if {@link #needsInput()} returns 
+ <code>true</code> indicating that more input data is required.
+ (Both native and non-native versions of various Decompressors require
+ that the data passed in via <code>b[]</code> remain unmodified until
+ the caller is explicitly notified--via {@link #needsInput()}--that the
+ buffer may be safely modified.  With this requirement, an extra
+ buffer-copy can be avoided.)
+ 
+ @param b Input data
+ @param off Start offset
+ @param len Length]]>
+      </doc>
+    </method>
+    <method name="needsInput" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns <code>true</code> if the input data buffer is empty and 
+ {@link #setInput(byte[], int, int)} should be called to
+ provide more input. 
+ 
+ @return <code>true</code> if the input data buffer is empty and 
+ {@link #setInput(byte[], int, int)} should be called in
+ order to provide more input.]]>
+      </doc>
+    </method>
+    <method name="setDictionary"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <doc>
+      <![CDATA[Sets preset dictionary for compression. A preset dictionary
+ is used when the history buffer can be predetermined. 
+
+ @param b Dictionary data bytes
+ @param off Start offset
+ @param len Length]]>
+      </doc>
+    </method>
+    <method name="needsDictionary" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns <code>true</code> if a preset dictionary is needed for decompression.
+ @return <code>true</code> if a preset dictionary is needed for decompression]]>
+      </doc>
+    </method>
+    <method name="finished" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns <code>true</code> if the end of the decompressed 
+ data output stream has been reached. Indicates a concatenated data stream
+ when finished() returns <code>true</code> and {@link #getRemaining()}
+ returns a positive value. finished() will be reset with the
+ {@link #reset()} method.
+ @return <code>true</code> if the end of the decompressed
+ data output stream has been reached.]]>
+      </doc>
+    </method>
+    <method name="decompress" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Fills specified buffer with uncompressed data. Returns actual number
+ of bytes of uncompressed data. A return value of 0 indicates that
+ {@link #needsInput()} should be called in order to determine if more
+ input data is required.
+ 
+ @param b Buffer for the uncompressed data
+ @param off Start offset of the data
+ @param len Size of the buffer
+ @return The actual number of bytes of uncompressed data.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRemaining" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the number of bytes remaining in the compressed data buffer.
+ Indicates a concatenated data stream if {@link #finished()} returns
+ <code>true</code> and getRemaining() returns a positive value. If
+ {@link #finished()} returns <code>true</code> and getRemaining() returns
+ a zero value, indicates that the end of data stream has been reached and
+ is not a concatenated data stream. 
+ @return The number of bytes remaining in the compressed data buffer.]]>
+      </doc>
+    </method>
+    <method name="reset"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Resets decompressor and input and output buffers so that a new set of
+ input data can be processed. If {@link #finished()}} returns
+ <code>true</code> and {@link #getRemaining()} returns a positive value,
+ reset() is called before processing of the next data stream in the
+ concatenated data stream. {@link #finished()} will be reset and will
+ return <code>false</code> when reset() is called.]]>
+      </doc>
+    </method>
+    <method name="end"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Closes the decompressor and discards any unprocessed input.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Specification of a stream-based 'de-compressor' which can be  
+ plugged into a {@link CompressionInputStream} to compress data.
+ This is modelled after {@link java.util.zip.Inflater}]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.io.compress.Decompressor -->
+  <!-- start class org.apache.hadoop.io.compress.DecompressorStream -->
+  <class name="DecompressorStream" extends="org.apache.hadoop.io.compress.CompressionInputStream"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="DecompressorStream" type="java.io.InputStream, org.apache.hadoop.io.compress.Decompressor, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <constructor name="DecompressorStream" type="java.io.InputStream, org.apache.hadoop.io.compress.Decompressor"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <constructor name="DecompressorStream" type="java.io.InputStream"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Allow derived classes to directly set the underlying stream.
+ 
+ @param in Underlying input stream.
+ @throws IOException]]>
+      </doc>
+    </constructor>
+    <method name="read" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="read" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="decompress" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getCompressedData" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="checkStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="resetState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="skip" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="n" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="available" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="markSupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="mark"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="readlimit" type="int"/>
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="buffer" type="byte[]"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="eof" type="boolean"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="closed" type="boolean"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.io.compress.DecompressorStream -->
+  <!-- start class org.apache.hadoop.io.compress.DefaultCodec -->
+  <class name="DefaultCodec" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+    <implements name="org.apache.hadoop.io.compress.DirectDecompressionCodec"/>
+    <constructor name="DefaultCodec"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.OutputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.OutputStream"/>
+      <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getCompressorType" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getDecompressorType" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createDirectDecompressor" return="org.apache.hadoop.io.compress.DirectDecompressor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getDefaultExtension" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.io.compress.DefaultCodec -->
+  <!-- start interface org.apache.hadoop.io.compress.DirectDecompressionCodec -->
+  <interface name="DirectDecompressionCodec"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+    <method name="createDirectDecompressor" return="org.apache.hadoop.io.compress.DirectDecompressor"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new {@link DirectDecompressor} for use by this {@link DirectDecompressionCodec}.
+ 
+ @return a new direct decompressor for use by this codec]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class encapsulates a codec which can decompress direct bytebuffers.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.io.compress.DirectDecompressionCodec -->
+  <!-- start interface org.apache.hadoop.io.compress.DirectDecompressor -->
+  <interface name="DirectDecompressor"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="decompress"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="src" type="java.nio.ByteBuffer"/>
+      <param name="dst" type="java.nio.ByteBuffer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Specification of a direct ByteBuffer 'de-compressor'.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.io.compress.DirectDecompressor -->
+  <!-- start class org.apache.hadoop.io.compress.GzipCodec -->
+  <class name="GzipCodec" extends="org.apache.hadoop.io.compress.DefaultCodec"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GzipCodec"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.OutputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.OutputStream"/>
+      <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getCompressorType" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDecompressorType" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createDirectDecompressor" return="org.apache.hadoop.io.compress.DirectDecompressor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDefaultExtension" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[This class creates gzip compressors/decompressors.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.compress.GzipCodec -->
+  <!-- start class org.apache.hadoop.io.compress.PassthroughCodec -->
+  <class name="PassthroughCodec" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+    <constructor name="PassthroughCodec"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getDefaultExtension" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.OutputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.OutputStream"/>
+      <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getCompressorType" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getDecompressorType" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="CLASSNAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Classname of the codec: {@value}.]]>
+      </doc>
+    </field>
+    <field name="OPT_EXTENSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Option to control the extension of the code: {@value}.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_EXTENSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This default extension is here so that if no extension has been defined,
+ some value is still returned: {@value}..]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[This is a special codec which does not transform the output.
+ It can be declared as a codec in the option "io.compression.codecs",
+ and then it will declare that it supports the file extension
+ set in {@link #OPT_EXTENSION}.
+
+ This allows decompression to be disabled on a job, even when there is
+ a registered/discoverable decompression codec for a file extension
+ -without having to change the standard codec binding mechanism.
+
+ For example, to disable decompression for a gzipped files, set the
+ options
+ <pre>
+   io.compression.codecs = org.apache.hadoop.io.compress.PassthroughCodec
+   io.compress.passthrough.extension = .gz
+ </pre>
+
+ <i>Note:</i> this is not a Splittable codec: it doesn't know the
+ capabilities of the passed in stream. It should be possible to
+ extend this in a subclass: the inner classes are marked as protected
+ to enable this. <i>Do not retrofit splitting to this class.</i>.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.compress.PassthroughCodec -->
+  <!-- start class org.apache.hadoop.io.compress.SplitCompressionInputStream -->
+  <class name="SplitCompressionInputStream" extends="org.apache.hadoop.io.compress.CompressionInputStream"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SplitCompressionInputStream" type="java.io.InputStream, long, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="setStart"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="start" type="long"/>
+    </method>
+    <method name="setEnd"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="end" type="long"/>
+    </method>
+    <method name="getAdjustedStart" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[After calling createInputStream, the values of start or end
+ might change.  So this method can be used to get the new value of start.
+ @return The changed value of start]]>
+      </doc>
+    </method>
+    <method name="getAdjustedEnd" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[After calling createInputStream, the values of start or end
+ might change.  So this method can be used to get the new value of end.
+ @return The changed value of end]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[An InputStream covering a range of compressed data. The start and end
+ offsets requested by a client may be modified by the codec to fit block
+ boundaries or other algorithm-dependent requirements.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.compress.SplitCompressionInputStream -->
+  <!-- start interface org.apache.hadoop.io.compress.SplittableCompressionCodec -->
+  <interface name="SplittableCompressionCodec"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.compress.CompressionCodec"/>
+    <method name="createInputStream" return="org.apache.hadoop.io.compress.SplitCompressionInputStream"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="seekableIn" type="java.io.InputStream"/>
+      <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/>
+      <param name="start" type="long"/>
+      <param name="end" type="long"/>
+      <param name="readMode" type="org.apache.hadoop.io.compress.SplittableCompressionCodec.READ_MODE"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a stream as dictated by the readMode.  This method is used when
+ the codecs wants the ability to work with the underlying stream positions.
+
+ @param seekableIn  The seekable input stream (seeks in compressed data)
+ @param start The start offset into the compressed stream. May be changed
+              by the underlying codec.
+ @param end The end offset into the compressed stream. May be changed by
+            the underlying codec.
+ @param readMode Controls whether stream position is reported continuously
+                 from the compressed stream only only at block boundaries.
+ @return  a stream to read uncompressed bytes from]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This interface is meant to be implemented by those compression codecs
+ which are capable to compress / de-compress a stream starting at any
+ arbitrary position.
+
+ Especially the process of de-compressing a stream starting at some arbitrary
+ position is challenging.  Most of the codecs are only able to successfully
+ de-compress a stream, if they start from the very beginning till the end.
+ One of the reasons is the stored state at the beginning of the stream which
+ is crucial for de-compression.
+
+ Yet there are few codecs which do not save the whole state at the beginning
+ of the stream and hence can be used to de-compress stream starting at any
+ arbitrary points.  This interface is meant to be used by such codecs.  Such
+ codecs are highly valuable, especially in the context of Hadoop, because
+ an input compressed file can be split and hence can be worked on by multiple
+ machines in parallel.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.io.compress.SplittableCompressionCodec -->
+</package>
+<package name="org.apache.hadoop.io.erasurecode">
+  <!-- start class org.apache.hadoop.io.erasurecode.ECSchema -->
+  <class name="ECSchema" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Serializable"/>
+    <constructor name="ECSchema" type="java.util.Map"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor with schema name and provided all options. Note the options may
+ contain additional information for the erasure codec to interpret further.
+ @param allOptions all schema options]]>
+      </doc>
+    </constructor>
+    <constructor name="ECSchema" type="java.lang.String, int, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor with key parameters provided.
+ @param codecName codec name
+ @param numDataUnits number of data units used in the schema
+ @param numParityUnits number os parity units used in the schema]]>
+      </doc>
+    </constructor>
+    <constructor name="ECSchema" type="java.lang.String, int, int, java.util.Map"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor with key parameters provided. Note the extraOptions may contain
+ additional information for the erasure codec to interpret further.
+ @param codecName codec name
+ @param numDataUnits number of data units used in the schema
+ @param numParityUnits number os parity units used in the schema
+ @param extraOptions extra options to configure the codec]]>
+      </doc>
+    </constructor>
+    <method name="getCodecName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the codec name
+ @return codec name]]>
+      </doc>
+    </method>
+    <method name="getExtraOptions" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get extra options specific to a erasure code.
+ @return extra options]]>
+      </doc>
+    </method>
+    <method name="getNumDataUnits" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get required data units count in a coding group
+ @return count of data units]]>
+      </doc>
+    </method>
+    <method name="getNumParityUnits" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get required parity units count in a coding group
+ @return count of parity units]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Make a meaningful string representation for log output.
+ @return string representation]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="NUM_DATA_UNITS_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NUM_PARITY_UNITS_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CODEC_NAME_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Erasure coding schema to housekeeper relevant information.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.erasurecode.ECSchema -->
+</package>
+<package name="org.apache.hadoop.io.erasurecode.coder.util">
+</package>
+<package name="org.apache.hadoop.io.erasurecode.grouper">
+</package>
+<package name="org.apache.hadoop.io.file.tfile">
+  <!-- start class org.apache.hadoop.io.file.tfile.MetaBlockAlreadyExists -->
+  <class name="MetaBlockAlreadyExists" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <doc>
+    <![CDATA[Exception - Meta Block with the same name already exists.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.file.tfile.MetaBlockAlreadyExists -->
+  <!-- start class org.apache.hadoop.io.file.tfile.MetaBlockDoesNotExist -->
+  <class name="MetaBlockDoesNotExist" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <doc>
+    <![CDATA[Exception - No such Meta Block with the given name.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.file.tfile.MetaBlockDoesNotExist -->
+  <!-- start interface org.apache.hadoop.io.file.tfile.RawComparable -->
+  <interface name="RawComparable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="buffer" return="byte[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the underlying byte array.
+ 
+ @return The underlying byte array.]]>
+      </doc>
+    </method>
+    <method name="offset" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the offset of the first byte in the byte array.
+ 
+ @return The offset of the first byte in the byte array.]]>
+      </doc>
+    </method>
+    <method name="size" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the size of the byte range in the byte array.
+ 
+ @return The size of the byte range in the byte array.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface for objects that can be compared through {@link RawComparator}.
+ This is useful in places where we need a single object reference to specify a
+ range of bytes in a byte array, such as {@link Comparable} or
+ {@link Collections#binarySearch(java.util.List, Object, Comparator)}
+ 
+ The actual comparison among RawComparable's requires an external
+ RawComparator and it is applications' responsibility to ensure two
+ RawComparable are supposed to be semantically comparable with the same
+ RawComparator.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.io.file.tfile.RawComparable -->
+  <!-- start class org.apache.hadoop.io.file.tfile.TFile -->
+  <class name="TFile" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="makeComparator" return="java.util.Comparator"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Make a raw comparator from a string name.
+ 
+ @param name
+          Comparator name
+ @return A RawComparable comparator.]]>
+      </doc>
+    </method>
+    <method name="getSupportedCompressionAlgorithms" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get names of supported compression algorithms. The names are acceptable by
+ TFile.Writer.
+ 
+ @return Array of strings, each represents a supported compression
+         algorithm. Currently, the following compression algorithms are
+         supported.
+         <ul>
+         <li>"none" - No compression.
+         <li>"lzo" - LZO compression.
+         <li>"gz" - GZIP compression.
+         </ul>]]>
+      </doc>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Dumping the TFile information.
+ 
+ @param args
+          A list of TFile paths.]]>
+      </doc>
+    </method>
+    <field name="COMPRESSION_GZ" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[compression: gzip]]>
+      </doc>
+    </field>
+    <field name="COMPRESSION_LZO" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[compression: lzo]]>
+      </doc>
+    </field>
+    <field name="COMPRESSION_NONE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[compression: none]]>
+      </doc>
+    </field>
+    <field name="COMPARATOR_MEMCMP" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[comparator: memcmp]]>
+      </doc>
+    </field>
+    <field name="COMPARATOR_JCLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[comparator prefix: java class]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[A TFile is a container of key-value pairs. Both keys and values are type-less
+ bytes. Keys are restricted to 64KB, value length is not restricted
+ (practically limited to the available disk storage). TFile further provides
+ the following features:
+ <ul>
+ <li>Block Compression.
+ <li>Named meta data blocks.
+ <li>Sorted or unsorted keys.
+ <li>Seek by key or by file offset.
+ </ul>
+ The memory footprint of a TFile includes the following:
+ <ul>
+ <li>Some constant overhead of reading or writing a compressed block.
+ <ul>
+ <li>Each compressed block requires one compression/decompression codec for
+ I/O.
+ <li>Temporary space to buffer the key.
+ <li>Temporary space to buffer the value (for TFile.Writer only). Values are
+ chunk encoded, so that we buffer at most one chunk of user data. By default,
+ the chunk buffer is 1MB. Reading chunked value does not require additional
+ memory.
+ </ul>
+ <li>TFile index, which is proportional to the total number of Data Blocks.
+ The total amount of memory needed to hold the index can be estimated as
+ (56+AvgKeySize)*NumBlocks.
+ <li>MetaBlock index, which is proportional to the total number of Meta
+ Blocks.The total amount of memory needed to hold the index for Meta Blocks
+ can be estimated as (40+AvgMetaBlockName)*NumMetaBlock.
+ </ul>
+ <p>
+ The behavior of TFile can be customized by the following variables through
+ Configuration:
+ <ul>
+ <li><b>tfile.io.chunk.size</b>: Value chunk size. Integer (in bytes). Default
+ to 1MB. Values of the length less than the chunk size is guaranteed to have
+ known value length in read time (See
+ {@link TFile.Reader.Scanner.Entry#isValueLengthKnown()}).
+ <li><b>tfile.fs.output.buffer.size</b>: Buffer size used for
+ FSDataOutputStream. Integer (in bytes). Default to 256KB.
+ <li><b>tfile.fs.input.buffer.size</b>: Buffer size used for
+ FSDataInputStream. Integer (in bytes). Default to 256KB.
+ </ul>
+ <p>
+ Suggestions on performance optimization.
+ <ul>
+ <li>Minimum block size. We recommend a setting of minimum block size between
+ 256KB to 1MB for general usage. Larger block size is preferred if files are
+ primarily for sequential access. However, it would lead to inefficient random
+ access (because there are more data to decompress). Smaller blocks are good
+ for random access, but require more memory to hold the block index, and may
+ be slower to create (because we must flush the compressor stream at the
+ conclusion of each data block, which leads to an FS I/O flush). Further, due
+ to the internal caching in Compression codec, the smallest possible block
+ size would be around 20KB-30KB.
+ <li>The current implementation does not offer true multi-threading for
+ reading. The implementation uses FSDataInputStream seek()+read(), which is
+ shown to be much faster than positioned-read call in single thread mode.
+ However, it also means that if multiple threads attempt to access the same
+ TFile (using multiple scanners) simultaneously, the actual I/O is carried out
+ sequentially even if they access different DFS blocks.
+ <li>Compression codec. Use "none" if the data is not very compressable (by
+ compressable, I mean a compression ratio at least 2:1). Generally, use "lzo"
+ as the starting point for experimenting. "gz" overs slightly better
+ compression ratio over "lzo" but requires 4x CPU to compress and 2x CPU to
+ decompress, comparing to "lzo".
+ <li>File system buffering, if the underlying FSDataInputStream and
+ FSDataOutputStream is already adequately buffered; or if applications
+ reads/writes keys and values in large buffers, we can reduce the sizes of
+ input/output buffering in TFile layer by setting the configuration parameters
+ "tfile.fs.input.buffer.size" and "tfile.fs.output.buffer.size".
+ </ul>
+ 
+ Some design rationale behind TFile can be found at <a
+ href=https://issues.apache.org/jira/browse/HADOOP-3315>Hadoop-3315</a>.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.file.tfile.TFile -->
+  <!-- start class org.apache.hadoop.io.file.tfile.Utils -->
+  <class name="Utils" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="writeVInt"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <param name="n" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Encoding an integer into a variable-length encoding format. Synonymous to
+ <code>Utils#writeVLong(out, n)</code>.
+ 
+ @param out
+          output stream
+ @param n
+          The integer to be encoded
+ @throws IOException
+ @see Utils#writeVLong(DataOutput, long)]]>
+      </doc>
+    </method>
+    <method name="writeVLong"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <param name="n" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Encoding a Long integer into a variable-length encoding format.
+ <ul>
+ <li>if n in [-32, 127): encode in one byte with the actual value.
+ Otherwise,
+ <li>if n in [-20*2^8, 20*2^8): encode in two bytes: byte[0] = n/256 - 52;
+ byte[1]=n&amp;0xff. Otherwise,
+ <li>if n IN [-16*2^16, 16*2^16): encode in three bytes: byte[0]=n/2^16 -
+ 88; byte[1]=(n&gt;&gt;8)&amp;0xff; byte[2]=n&amp;0xff. Otherwise,
+ <li>if n in [-8*2^24, 8*2^24): encode in four bytes: byte[0]=n/2^24 - 112;
+ byte[1] = (n&gt;&gt;16)&amp;0xff; byte[2] = (n&gt;&gt;8)&amp;0xff;
+ byte[3]=n&amp;0xff.
+ Otherwise:
+ <li>if n in [-2^31, 2^31): encode in five bytes: byte[0]=-125; byte[1] =
+ (n&gt;&gt;24)&amp;0xff; byte[2]=(n&gt;&gt;16)&amp;0xff;
+ byte[3]=(n&gt;&gt;8)&amp;0xff; byte[4]=n&amp;0xff;
+ <li>if n in [-2^39, 2^39): encode in six bytes: byte[0]=-124; byte[1] =
+ (n&gt;&gt;32)&amp;0xff; byte[2]=(n&gt;&gt;24)&amp;0xff;
+ byte[3]=(n&gt;&gt;16)&amp;0xff; byte[4]=(n&gt;&gt;8)&amp;0xff;
+ byte[5]=n&amp;0xff
+ <li>if n in [-2^47, 2^47): encode in seven bytes: byte[0]=-123; byte[1] =
+ (n&gt;&gt;40)&amp;0xff; byte[2]=(n&gt;&gt;32)&amp;0xff;
+ byte[3]=(n&gt;&gt;24)&amp;0xff; byte[4]=(n&gt;&gt;16)&amp;0xff;
+ byte[5]=(n&gt;&gt;8)&amp;0xff; byte[6]=n&amp;0xff;
+ <li>if n in [-2^55, 2^55): encode in eight bytes: byte[0]=-122; byte[1] =
+ (n&gt;&gt;48)&amp;0xff; byte[2] = (n&gt;&gt;40)&amp;0xff;
+ byte[3]=(n&gt;&gt;32)&amp;0xff; byte[4]=(n&gt;&gt;24)&amp;0xff; byte[5]=
+ (n&gt;&gt;16)&amp;0xff; byte[6]=(n&gt;&gt;8)&amp;0xff; byte[7]=n&amp;0xff;
+ <li>if n in [-2^63, 2^63): encode in nine bytes: byte[0]=-121; byte[1] =
+ (n&gt;&gt;54)&amp;0xff; byte[2] = (n&gt;&gt;48)&amp;0xff;
+ byte[3] = (n&gt;&gt;40)&amp;0xff; byte[4]=(n&gt;&gt;32)&amp;0xff;
+ byte[5]=(n&gt;&gt;24)&amp;0xff; byte[6]=(n&gt;&gt;16)&amp;0xff; byte[7]=
+ (n&gt;&gt;8)&amp;0xff; byte[8]=n&amp;0xff;
+ </ul>
+ 
+ @param out
+          output stream
+ @param n
+          the integer number
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readVInt" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Decoding the variable-length integer. Synonymous to
+ <code>(int)Utils#readVLong(in)</code>.
+ 
+ @param in
+          input stream
+ @return the decoded integer
+ @throws IOException
+ 
+ @see Utils#readVLong(DataInput)]]>
+      </doc>
+    </method>
+    <method name="readVLong" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Decoding the variable-length integer. Suppose the value of the first byte
+ is FB, and the following bytes are NB[*].
+ <ul>
+ <li>if (FB &gt;= -32), return (long)FB;
+ <li>if (FB in [-72, -33]), return (FB+52)&lt;&lt;8 + NB[0]&amp;0xff;
+ <li>if (FB in [-104, -73]), return (FB+88)&lt;&lt;16 +
+ (NB[0]&amp;0xff)&lt;&lt;8 + NB[1]&amp;0xff;
+ <li>if (FB in [-120, -105]), return (FB+112)&lt;&lt;24 + (NB[0]&amp;0xff)
+ &lt;&lt;16 + (NB[1]&amp;0xff)&lt;&lt;8 + NB[2]&amp;0xff;
+ <li>if (FB in [-128, -121]), return interpret NB[FB+129] as a signed
+ big-endian integer.
+ </ul>
+ @param in
+          input stream
+ @return the decoded long integer.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="writeString"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <param name="s" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write a String as a VInt n, followed by n Bytes as in Text format.
+ 
+ @param out
+ @param s
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read a String as a VInt n, followed by n Bytes in Text format.
+ 
+ @param in
+          The input stream.
+ @return The string
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="lowerBound" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="list" type="java.util.List"/>
+      <param name="key" type="T"/>
+      <param name="cmp" type="java.util.Comparator"/>
+      <doc>
+      <![CDATA[Lower bound binary search. Find the index to the first element in the list
+ that compares greater than or equal to key.
+ 
+ @param <T>
+          Type of the input key.
+ @param list
+          The list
+ @param key
+          The input key.
+ @param cmp
+          Comparator for the key.
+ @return The index to the desired element if it exists; or list.size()
+         otherwise.]]>
+      </doc>
+    </method>
+    <method name="upperBound" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="list" type="java.util.List"/>
+      <param name="key" type="T"/>
+      <param name="cmp" type="java.util.Comparator"/>
+      <doc>
+      <![CDATA[Upper bound binary search. Find the index to the first element in the list
+ that compares greater than the input key.
+ 
+ @param <T>
+          Type of the input key.
+ @param list
+          The list
+ @param key
+          The input key.
+ @param cmp
+          Comparator for the key.
+ @return The index to the desired element if it exists; or list.size()
+         otherwise.]]>
+      </doc>
+    </method>
+    <method name="lowerBound" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="list" type="java.util.List"/>
+      <param name="key" type="T"/>
+      <doc>
+      <![CDATA[Lower bound binary search. Find the index to the first element in the list
+ that compares greater than or equal to key.
+ 
+ @param <T>
+          Type of the input key.
+ @param list
+          The list
+ @param key
+          The input key.
+ @return The index to the desired element if it exists; or list.size()
+         otherwise.]]>
+      </doc>
+    </method>
+    <method name="upperBound" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="list" type="java.util.List"/>
+      <param name="key" type="T"/>
+      <doc>
+      <![CDATA[Upper bound binary search. Find the index to the first element in the list
+ that compares greater than the input key.
+ 
+ @param <T>
+          Type of the input key.
+ @param list
+          The list
+ @param key
+          The input key.
+ @return The index to the desired element if it exists; or list.size()
+         otherwise.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Supporting Utility classes used by TFile, and shared by users of TFile.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.file.tfile.Utils -->
+</package>
+<package name="org.apache.hadoop.io.serializer">
+  <!-- start class org.apache.hadoop.io.serializer.JavaSerialization -->
+  <class name="JavaSerialization" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.serializer.Serialization"/>
+    <constructor name="JavaSerialization"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[<p>
+ An experimental {@link Serialization} for Java {@link Serializable} classes.
+ </p>
+ @see JavaSerializationComparator]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.serializer.JavaSerialization -->
+  <!-- start class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+  <class name="JavaSerializationComparator" extends="org.apache.hadoop.io.serializer.DeserializerComparator"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="JavaSerializationComparator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <doc>
+    <![CDATA[<p>
+ A {@link RawComparator} that uses a {@link JavaSerialization}
+ {@link Deserializer} to deserialize objects that are then compared via
+ their {@link Comparable} interfaces.
+ </p>
+ @param <T>
+ @see JavaSerialization]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.serializer.JavaSerializationComparator -->
+  <!-- start class org.apache.hadoop.io.serializer.WritableSerialization -->
+  <class name="WritableSerialization" extends="org.apache.hadoop.conf.Configured"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.serializer.Serialization"/>
+    <constructor name="WritableSerialization"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[A {@link Serialization} for {@link Writable}s that delegates to
+ {@link Writable#write(java.io.DataOutput)} and
+ {@link Writable#readFields(java.io.DataInput)}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.serializer.WritableSerialization -->
+  <doc>
+  <![CDATA[<p>
+This package provides a mechanism for using different serialization frameworks
+in Hadoop. The property "io.serializations" defines a list of
+{@link org.apache.hadoop.io.serializer.Serialization}s that know how to create
+{@link org.apache.hadoop.io.serializer.Serializer}s and
+{@link org.apache.hadoop.io.serializer.Deserializer}s.
+</p>
+
+<p>
+To add a new serialization framework write an implementation of
+{@link org.apache.hadoop.io.serializer.Serialization} and add its name to the
+"io.serializations" property.
+</p>]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.io.serializer.avro">
+  <!-- start interface org.apache.hadoop.io.serializer.avro.AvroReflectSerializable -->
+  <interface name="AvroReflectSerializable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <doc>
+    <![CDATA[Tag interface for Avro 'reflect' serializable classes. Classes implementing 
+ this interface can be serialized/deserialized using 
+ {@link AvroReflectSerialization}.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.io.serializer.avro.AvroReflectSerializable -->
+  <!-- start class org.apache.hadoop.io.serializer.avro.AvroReflectSerialization -->
+  <class name="AvroReflectSerialization" extends="org.apache.hadoop.io.serializer.avro.AvroSerialization"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AvroReflectSerialization"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <field name="AVRO_REFLECT_PACKAGES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Key to configure packages that contain classes to be serialized and 
+ deserialized using this class. Multiple packages can be specified using 
+ comma-separated list.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[Serialization for Avro Reflect classes. For a class to be accepted by this 
+ serialization, it must either be in the package list configured via 
+ <code>avro.reflect.pkgs</code> or implement 
+ {@link AvroReflectSerializable} interface.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.serializer.avro.AvroReflectSerialization -->
+  <!-- start class org.apache.hadoop.io.serializer.avro.AvroSerialization -->
+  <class name="AvroSerialization" extends="org.apache.hadoop.conf.Configured"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.serializer.Serialization"/>
+    <constructor name="AvroSerialization"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <field name="AVRO_SCHEMA_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Base class for providing serialization to Avro types.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.serializer.avro.AvroSerialization -->
+  <!-- start class org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization -->
+  <class name="AvroSpecificSerialization" extends="org.apache.hadoop.io.serializer.avro.AvroSerialization"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AvroSpecificSerialization"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Serialization for Avro Specific classes. This serialization is to be used 
+ for classes generated by Avro's 'specific' compiler.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization -->
+  <doc>
+  <![CDATA[<p>
+This package provides Avro serialization in Hadoop. This can be used to 
+serialize/deserialize Avro types in Hadoop.
+</p>
+
+<p>
+Use {@link org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization} for 
+serialization of classes generated by Avro's 'specific' compiler.
+</p>
+
+<p>
+Use {@link org.apache.hadoop.io.serializer.avro.AvroReflectSerialization} for 
+other classes. 
+{@link org.apache.hadoop.io.serializer.avro.AvroReflectSerialization} work for  
+any class which is either in the package list configured via 
+{@link org.apache.hadoop.io.serializer.avro.AvroReflectSerialization#AVRO_REFLECT_PACKAGES} 
+or implement {@link org.apache.hadoop.io.serializer.avro.AvroReflectSerializable}
+interface.
+</p>]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.ipc.protocolPB">
+</package>
+<package name="org.apache.hadoop.log">
+</package>
+<package name="org.apache.hadoop.log.metrics">
+  <!-- start class org.apache.hadoop.log.metrics.EventCounter -->
+  <class name="EventCounter" extends="org.apache.log4j.AppenderSkeleton"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="EventCounter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="append"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="requiresLayout" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A log4J Appender that simply counts logging events in three levels:
+ fatal, error and warn. The class name is used in log4j.properties]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.log.metrics.EventCounter -->
+</package>
+<package name="org.apache.hadoop.metrics2">
+  <!-- start class org.apache.hadoop.metrics2.AbstractMetric -->
+  <class name="AbstractMetric" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.metrics2.MetricsInfo"/>
+    <constructor name="AbstractMetric" type="org.apache.hadoop.metrics2.MetricsInfo"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct the metric
+ @param info  about the metric]]>
+      </doc>
+    </constructor>
+    <method name="name" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="description" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="info" return="org.apache.hadoop.metrics2.MetricsInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="value" return="java.lang.Number"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the value of the metric
+ @return the value of the metric]]>
+      </doc>
+    </method>
+    <method name="type" return="org.apache.hadoop.metrics2.MetricType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the type of the metric
+ @return the type of the metric]]>
+      </doc>
+    </method>
+    <method name="visit"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="visitor" type="org.apache.hadoop.metrics2.MetricsVisitor"/>
+      <doc>
+      <![CDATA[Accept a visitor interface
+ @param visitor of the metric]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[The immutable metric]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.AbstractMetric -->
+  <!-- start interface org.apache.hadoop.metrics2.MetricsCollector -->
+  <interface name="MetricsCollector"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="addRecord" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a metrics record
+ @param name  of the record
+ @return  a {@link MetricsRecordBuilder} for the record {@code name}]]>
+      </doc>
+    </method>
+    <method name="addRecord" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <doc>
+      <![CDATA[Add a metrics record
+ @param info  of the record
+ @return  a {@link MetricsRecordBuilder} for metrics {@code info}]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The metrics collector interface]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.metrics2.MetricsCollector -->
+  <!-- start class org.apache.hadoop.metrics2.MetricsException -->
+  <class name="MetricsException" extends="java.lang.RuntimeException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MetricsException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct the exception with a message
+ @param message for the exception]]>
+      </doc>
+    </constructor>
+    <constructor name="MetricsException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct the exception with a message and a cause
+ @param message for the exception
+ @param cause of the exception]]>
+      </doc>
+    </constructor>
+    <constructor name="MetricsException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct the exception with a cause
+ @param cause of the exception]]>
+      </doc>
+    </constructor>
+    <doc>
+    <![CDATA[A general metrics exception wrapper]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.MetricsException -->
+  <!-- start class org.apache.hadoop.metrics2.MetricsFilter -->
+  <class name="MetricsFilter" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.metrics2.MetricsPlugin"/>
+    <constructor name="MetricsFilter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="accepts" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Whether to accept the name
+ @param name  to filter on
+ @return  true to accept; false otherwise.]]>
+      </doc>
+    </method>
+    <method name="accepts" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tag" type="org.apache.hadoop.metrics2.MetricsTag"/>
+      <doc>
+      <![CDATA[Whether to accept the tag
+ @param tag to filter on
+ @return  true to accept; false otherwise]]>
+      </doc>
+    </method>
+    <method name="accepts" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tags" type="java.lang.Iterable"/>
+      <doc>
+      <![CDATA[Whether to accept the tags
+ @param tags to filter on
+ @return  true to accept; false otherwise]]>
+      </doc>
+    </method>
+    <method name="accepts" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="record" type="org.apache.hadoop.metrics2.MetricsRecord"/>
+      <doc>
+      <![CDATA[Whether to accept the record
+ @param record  to filter on
+ @return  true to accept; false otherwise.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The metrics filter interface. The MetricsFilter objects can be used either to
+ filter the metrics from {@link MetricsSource}s or to filter metrics per
+ {@link MetricsSink}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.MetricsFilter -->
+  <!-- start interface org.apache.hadoop.metrics2.MetricsInfo -->
+  <interface name="MetricsInfo"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="name" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Typically name corresponds to annotation {@link Metric#value()} or
+ the name of the class.
+ @return the name of the metric/tag]]>
+      </doc>
+    </method>
+    <method name="description" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Typically the description corresponds to annotation {@link Metric#about()}
+ or the name of the class.
+ @return the description of the metric/tag]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface to provide immutable metainfo for metrics.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.metrics2.MetricsInfo -->
+  <!-- start class org.apache.hadoop.metrics2.MetricsJsonBuilder -->
+  <class name="MetricsJsonBuilder" extends="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MetricsJsonBuilder" type="org.apache.hadoop.metrics2.MetricsCollector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Build an instance.
+ @param parent parent collector. Unused in this instance; only used for
+ the {@link #parent()} method]]>
+      </doc>
+    </constructor>
+    <method name="tag" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="java.lang.String"/>
+    </method>
+    <method name="add" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tag" type="org.apache.hadoop.metrics2.MetricsTag"/>
+    </method>
+    <method name="add" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="metric" type="org.apache.hadoop.metrics2.AbstractMetric"/>
+    </method>
+    <method name="setContext" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="java.lang.String"/>
+    </method>
+    <method name="addCounter" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="int"/>
+    </method>
+    <method name="addCounter" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="long"/>
+    </method>
+    <method name="addGauge" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="int"/>
+    </method>
+    <method name="addGauge" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="long"/>
+    </method>
+    <method name="addGauge" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="float"/>
+    </method>
+    <method name="addGauge" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="double"/>
+    </method>
+    <method name="parent" return="org.apache.hadoop.metrics2.MetricsCollector"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Build a JSON dump of the metrics.
+
+ The {@link #toString()} operator dumps out all values collected.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.MetricsJsonBuilder -->
+  <!-- start interface org.apache.hadoop.metrics2.MetricsPlugin -->
+  <interface name="MetricsPlugin"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="init"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.commons.configuration2.SubsetConfiguration"/>
+      <doc>
+      <![CDATA[Initialize the plugin
+ @param conf  the configuration object for the plugin]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The plugin interface for the metrics framework]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.metrics2.MetricsPlugin -->
+  <!-- start interface org.apache.hadoop.metrics2.MetricsRecord -->
+  <interface name="MetricsRecord"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="timestamp" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the timestamp of the metrics
+ @return  the timestamp]]>
+      </doc>
+    </method>
+    <method name="name" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the metrics record name]]>
+      </doc>
+    </method>
+    <method name="description" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the description of the metrics record]]>
+      </doc>
+    </method>
+    <method name="context" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the context name of the metrics record]]>
+      </doc>
+    </method>
+    <method name="tags" return="java.util.Collection"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the tags of the record
+ Note: returning a collection instead of iterable as we
+ need to use tags as keys (hence Collection#hashCode etc.) in maps
+ @return an unmodifiable collection of tags]]>
+      </doc>
+    </method>
+    <method name="metrics" return="java.lang.Iterable"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the metrics of the record
+ @return an immutable iterable interface for metrics]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[An immutable snapshot of metrics with a timestamp]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.metrics2.MetricsRecord -->
+  <!-- start class org.apache.hadoop.metrics2.MetricsRecordBuilder -->
+  <class name="MetricsRecordBuilder" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MetricsRecordBuilder"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="tag" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a metrics value with metrics information
+ @param info  metadata of the tag
+ @param value of the tag
+ @return self]]>
+      </doc>
+    </method>
+    <method name="add" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tag" type="org.apache.hadoop.metrics2.MetricsTag"/>
+      <doc>
+      <![CDATA[Add an immutable metrics tag object
+ @param tag a pre-made tag object (potentially save an object construction)
+ @return self]]>
+      </doc>
+    </method>
+    <method name="add" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="metric" type="org.apache.hadoop.metrics2.AbstractMetric"/>
+      <doc>
+      <![CDATA[Add a pre-made immutable metric object
+ @param metric  the pre-made metric to save an object construction
+ @return self]]>
+      </doc>
+    </method>
+    <method name="setContext" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the context tag
+ @param value of the context
+ @return self]]>
+      </doc>
+    </method>
+    <method name="addCounter" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="int"/>
+      <doc>
+      <![CDATA[Add an integer metric
+ @param info  metadata of the metric
+ @param value of the metric
+ @return self]]>
+      </doc>
+    </method>
+    <method name="addCounter" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Add an long metric
+ @param info  metadata of the metric
+ @param value of the metric
+ @return self]]>
+      </doc>
+    </method>
+    <method name="addGauge" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="int"/>
+      <doc>
+      <![CDATA[Add a integer gauge metric
+ @param info  metadata of the metric
+ @param value of the metric
+ @return self]]>
+      </doc>
+    </method>
+    <method name="addGauge" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Add a long gauge metric
+ @param info  metadata of the metric
+ @param value of the metric
+ @return self]]>
+      </doc>
+    </method>
+    <method name="addGauge" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="float"/>
+      <doc>
+      <![CDATA[Add a float gauge metric
+ @param info  metadata of the metric
+ @param value of the metric
+ @return self]]>
+      </doc>
+    </method>
+    <method name="addGauge" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="double"/>
+      <doc>
+      <![CDATA[Add a double gauge metric
+ @param info  metadata of the metric
+ @param value of the metric
+ @return self]]>
+      </doc>
+    </method>
+    <method name="parent" return="org.apache.hadoop.metrics2.MetricsCollector"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the parent metrics collector object]]>
+      </doc>
+    </method>
+    <method name="endRecord" return="org.apache.hadoop.metrics2.MetricsCollector"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Syntactic sugar to add multiple records in a collector in a one liner.
+ @return the parent metrics collector object]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The metrics record builder interface]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.MetricsRecordBuilder -->
+  <!-- start interface org.apache.hadoop.metrics2.MetricsSink -->
+  <interface name="MetricsSink"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.metrics2.MetricsPlugin"/>
+    <method name="putMetrics"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="record" type="org.apache.hadoop.metrics2.MetricsRecord"/>
+      <doc>
+      <![CDATA[Put a metrics record in the sink
+ @param record  the record to put]]>
+      </doc>
+    </method>
+    <method name="flush"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Flush any buffered metrics]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The metrics sink interface. <p>
+ Implementations of this interface consume the {@link MetricsRecord} generated
+ from {@link MetricsSource}. It registers with {@link MetricsSystem} which
+ periodically pushes the {@link MetricsRecord} to the sink using
+ {@link #putMetrics(MetricsRecord)} method.  If the implementing class also
+ implements {@link Closeable}, then the MetricsSystem will close the sink when
+ it is stopped.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.metrics2.MetricsSink -->
+  <!-- start interface org.apache.hadoop.metrics2.MetricsSource -->
+  <interface name="MetricsSource"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getMetrics"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="collector" type="org.apache.hadoop.metrics2.MetricsCollector"/>
+      <param name="all" type="boolean"/>
+      <doc>
+      <![CDATA[Get metrics from the metrics source
+ @param collector to contain the resulting metrics snapshot
+ @param all if true, return all metrics even if unchanged.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The source of metrics information. It generates and updates metrics. It
+ registers with {@link MetricsSystem}, which periodically polls it to collect
+ {@link MetricsRecord} and passes it to {@link MetricsSink}.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.metrics2.MetricsSource -->
+  <!-- start class org.apache.hadoop.metrics2.MetricsSystem -->
+  <class name="MetricsSystem" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.metrics2.MetricsSystemMXBean"/>
+    <constructor name="MetricsSystem"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="unregisterSource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Unregister a metrics source
+ @param name of the source. This is the name you use to call register()]]>
+      </doc>
+    </method>
+    <method name="register" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="source" type="T"/>
+      <doc>
+      <![CDATA[Register a metrics source (deriving name and description from the object)
+ @param <T>   the actual type of the source object
+ @param source  object to register
+ @return  the source object
+ @exception MetricsException]]>
+      </doc>
+    </method>
+    <method name="register" return="T"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="desc" type="java.lang.String"/>
+      <param name="source" type="T"/>
+      <doc>
+      <![CDATA[Register a metrics source
+ @param <T>   the actual type of the source object
+ @param source object to register
+ @param name  of the source. Must be unique or null (then extracted from
+              the annotations of the source object.)
+ @param desc  the description of the source (or null. See above.)
+ @return the source object
+ @exception MetricsException]]>
+      </doc>
+    </method>
+    <method name="register"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="callback" type="org.apache.hadoop.metrics2.MetricsSystem.Callback"/>
+      <doc>
+      <![CDATA[Register a callback interface for JMX events
+ @param callback  the callback object implementing the MBean interface.]]>
+      </doc>
+    </method>
+    <method name="publishMetricsNow"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Requests an immediate publish of all metrics from sources to sinks.
+ 
+ This is a "soft" request: the expectation is that a best effort will be
+ done to synchronously snapshot the metrics from all the sources and put
+ them in all the sinks (including flushing the sinks) before returning to
+ the caller. If this can't be accomplished in reasonable time it's OK to
+ return to the caller before everything is done.]]>
+      </doc>
+    </method>
+    <method name="shutdown" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Shutdown the metrics system completely (usually during server shutdown.)
+ The MetricsSystemMXBean will be unregistered.
+ @return true if shutdown completed]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The metrics system interface.
+ 
+ The following components are used for metrics.
+ <ul>
+ <li>{@link MetricsSource} generate and update metrics information.</li>
+ <li>{@link MetricsSink} consume the metrics information</li>
+ </ul>
+ 
+ {@link MetricsSource} and {@link MetricsSink} register with the metrics
+ system. Implementations of {@link MetricsSystem} polls the
+ {@link MetricsSource}s periodically and pass the {@link MetricsRecord}s to
+ {@link MetricsSink}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.MetricsSystem -->
+  <!-- start interface org.apache.hadoop.metrics2.MetricsSystemMXBean -->
+  <interface name="MetricsSystemMXBean"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="start"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Start the metrics system
+ @throws MetricsException]]>
+      </doc>
+    </method>
+    <method name="stop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Stop the metrics system
+ @throws MetricsException]]>
+      </doc>
+    </method>
+    <method name="startMetricsMBeans"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Start metrics MBeans
+ @throws MetricsException]]>
+      </doc>
+    </method>
+    <method name="stopMetricsMBeans"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Stop metrics MBeans.
+ Note, it doesn't stop the metrics system control MBean,
+ i.e this interface.
+ @throws MetricsException]]>
+      </doc>
+    </method>
+    <method name="currentConfig" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the current config
+ Avoided getConfig, as it'll turn into a "Config" attribute,
+ which doesn't support multiple line values in jconsole.
+ @throws MetricsException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The JMX interface to the metrics system]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.metrics2.MetricsSystemMXBean -->
+  <!-- start class org.apache.hadoop.metrics2.MetricsTag -->
+  <class name="MetricsTag" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.metrics2.MetricsInfo"/>
+    <constructor name="MetricsTag" type="org.apache.hadoop.metrics2.MetricsInfo, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct the tag with name, description and value
+ @param info  of the tag
+ @param value of the tag]]>
+      </doc>
+    </constructor>
+    <method name="name" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="description" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="info" return="org.apache.hadoop.metrics2.MetricsInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the info object of the tag]]>
+      </doc>
+    </method>
+    <method name="value" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the value of the tag
+ @return  the value]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Immutable tag for metrics (for grouping on host/queue/username etc.)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.MetricsTag -->
+  <!-- start class org.apache.hadoop.metrics2.MetricStringBuilder -->
+  <class name="MetricStringBuilder" extends="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MetricStringBuilder" type="org.apache.hadoop.metrics2.MetricsCollector, java.lang.String, java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Build an instance.
+ @param parent parent collector. Unused in this instance; only used for
+ the {@link #parent()} method
+ @param prefix string before each entry
+ @param separator separator between name and value
+ @param suffix suffix after each entry]]>
+      </doc>
+    </constructor>
+    <method name="add" return="org.apache.hadoop.metrics2.MetricStringBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="java.lang.Object"/>
+    </method>
+    <method name="tuple" return="org.apache.hadoop.metrics2.MetricStringBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add any key,val pair to the string, between the prefix and suffix,
+ separated by the separator.
+ @param key key
+ @param value value
+ @return this instance]]>
+      </doc>
+    </method>
+    <method name="tag" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="java.lang.String"/>
+    </method>
+    <method name="add" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tag" type="org.apache.hadoop.metrics2.MetricsTag"/>
+    </method>
+    <method name="add" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="metric" type="org.apache.hadoop.metrics2.AbstractMetric"/>
+    </method>
+    <method name="setContext" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="java.lang.String"/>
+    </method>
+    <method name="addCounter" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="int"/>
+    </method>
+    <method name="addCounter" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="long"/>
+    </method>
+    <method name="addGauge" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="int"/>
+    </method>
+    <method name="addGauge" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="long"/>
+    </method>
+    <method name="addGauge" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="float"/>
+    </method>
+    <method name="addGauge" return="org.apache.hadoop.metrics2.MetricsRecordBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="double"/>
+    </method>
+    <method name="parent" return="org.apache.hadoop.metrics2.MetricsCollector"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Build a string dump of the metrics.
+
+ The {@link #toString()} operator dumps out all values collected.
+
+ Every entry is formatted as
+ {@code prefix + name + separator + value + suffix}]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.MetricStringBuilder -->
+  <!-- start interface org.apache.hadoop.metrics2.MetricsVisitor -->
+  <interface name="MetricsVisitor"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="gauge"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="int"/>
+      <doc>
+      <![CDATA[Callback for integer value gauges
+ @param info  the metric info
+ @param value of the metric]]>
+      </doc>
+    </method>
+    <method name="gauge"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Callback for long value gauges
+ @param info  the metric info
+ @param value of the metric]]>
+      </doc>
+    </method>
+    <method name="gauge"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="float"/>
+      <doc>
+      <![CDATA[Callback for float value gauges
+ @param info  the metric info
+ @param value of the metric]]>
+      </doc>
+    </method>
+    <method name="gauge"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="double"/>
+      <doc>
+      <![CDATA[Callback for double value gauges
+ @param info  the metric info
+ @param value of the metric]]>
+      </doc>
+    </method>
+    <method name="counter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="int"/>
+      <doc>
+      <![CDATA[Callback for integer value counters
+ @param info  the metric info
+ @param value of the metric]]>
+      </doc>
+    </method>
+    <method name="counter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Callback for long value counters
+ @param info  the metric info
+ @param value of the metric]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A visitor interface for metrics]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.metrics2.MetricsVisitor -->
+</package>
+<package name="org.apache.hadoop.metrics2.annotation">
+  <!-- start class org.apache.hadoop.metrics2.annotation.Metric -->
+  <class name="Metric"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.annotation.Annotation"/>
+    <doc>
+    <![CDATA[Annotation interface for a single metric used to annotate a field or a method
+ in the class.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.annotation.Metric -->
+  <!-- start class org.apache.hadoop.metrics2.annotation.Metrics -->
+  <class name="Metrics"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.annotation.Annotation"/>
+    <doc>
+    <![CDATA[Annotation interface for a group of metrics]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.annotation.Metrics -->
+</package>
+<package name="org.apache.hadoop.metrics2.filter">
+  <!-- start class org.apache.hadoop.metrics2.filter.GlobFilter -->
+  <class name="GlobFilter" extends="org.apache.hadoop.metrics2.filter.AbstractPatternFilter"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GlobFilter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="compile" return="com.google.re2j.Pattern"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="s" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[A glob pattern filter for metrics.
+
+ The class name is used in metrics config files]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.filter.GlobFilter -->
+  <!-- start class org.apache.hadoop.metrics2.filter.RegexFilter -->
+  <class name="RegexFilter" extends="org.apache.hadoop.metrics2.filter.AbstractPatternFilter"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RegexFilter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="compile" return="com.google.re2j.Pattern"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="s" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[A regex pattern filter for metrics]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.filter.RegexFilter -->
+</package>
+<package name="org.apache.hadoop.metrics2.lib">
+  <!-- start class org.apache.hadoop.metrics2.lib.DefaultMetricsSystem -->
+  <class name="DefaultMetricsSystem" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.metrics2.lib.DefaultMetricsSystem[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.metrics2.lib.DefaultMetricsSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="initialize" return="org.apache.hadoop.metrics2.MetricsSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="prefix" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Convenience method to initialize the metrics system
+ @param prefix  for the metrics system configuration
+ @return the metrics system instance]]>
+      </doc>
+    </method>
+    <method name="instance" return="org.apache.hadoop.metrics2.MetricsSystem"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the metrics system object]]>
+      </doc>
+    </method>
+    <method name="shutdown"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Shutdown the metrics system]]>
+      </doc>
+    </method>
+    <method name="setMiniClusterMode"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="choice" type="boolean"/>
+    </method>
+    <method name="inMiniClusterMode" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[The default metrics system singleton. This class is used by all the daemon
+ processes(such as NameNode, DataNode, JobTracker etc.). During daemon process
+ initialization the processes call {@link DefaultMetricsSystem#init(String)}
+ to initialize the {@link MetricsSystem}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.DefaultMetricsSystem -->
+  <!-- start class org.apache.hadoop.metrics2.lib.Interns -->
+  <class name="Interns" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Interns"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="info" return="org.apache.hadoop.metrics2.MetricsInfo"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="description" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get a metric info object.
+ @param name Name of metric info object
+ @param description Description of metric info object
+ @return an interned metric info object]]>
+      </doc>
+    </method>
+    <method name="tag" return="org.apache.hadoop.metrics2.MetricsTag"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get a metrics tag.
+ @param info  of the tag
+ @param value of the tag
+ @return an interned metrics tag]]>
+      </doc>
+    </method>
+    <method name="tag" return="org.apache.hadoop.metrics2.MetricsTag"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="description" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get a metrics tag.
+ @param name  of the tag
+ @param description of the tag
+ @param value of the tag
+ @return an interned metrics tag]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Helpers to create interned metrics info.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.Interns -->
+  <!-- start class org.apache.hadoop.metrics2.lib.MetricsRegistry -->
+  <class name="MetricsRegistry" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MetricsRegistry" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct the registry with a record name
+ @param name  of the record of the metrics]]>
+      </doc>
+    </constructor>
+    <constructor name="MetricsRegistry" type="org.apache.hadoop.metrics2.MetricsInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct the registry with a metadata object
+ @param info  the info object for the metrics record/group]]>
+      </doc>
+    </constructor>
+    <method name="info" return="org.apache.hadoop.metrics2.MetricsInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the info object of the metrics registry]]>
+      </doc>
+    </method>
+    <method name="get" return="org.apache.hadoop.metrics2.lib.MutableMetric"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get a metric by name
+ @param name  of the metric
+ @return the metric object]]>
+      </doc>
+    </method>
+    <method name="getTag" return="org.apache.hadoop.metrics2.MetricsTag"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get a tag by name
+ @param name  of the tag
+ @return the tag object]]>
+      </doc>
+    </method>
+    <method name="newCounter" return="org.apache.hadoop.metrics2.lib.MutableCounterInt"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="desc" type="java.lang.String"/>
+      <param name="iVal" type="int"/>
+      <doc>
+      <![CDATA[Create a mutable integer counter
+ @param name  of the metric
+ @param desc  metric description
+ @param iVal  initial value
+ @return a new counter object]]>
+      </doc>
+    </method>
+    <method name="newCounter" return="org.apache.hadoop.metrics2.lib.MutableCounterInt"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="iVal" type="int"/>
+      <doc>
+      <![CDATA[Create a mutable integer counter
+ @param info  metadata of the metric
+ @param iVal  initial value
+ @return a new counter object]]>
+      </doc>
+    </method>
+    <method name="newCounter" return="org.apache.hadoop.metrics2.lib.MutableCounterLong"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="desc" type="java.lang.String"/>
+      <param name="iVal" type="long"/>
+      <doc>
+      <![CDATA[Create a mutable long integer counter
+ @param name  of the metric
+ @param desc  metric description
+ @param iVal  initial value
+ @return a new counter object]]>
+      </doc>
+    </method>
+    <method name="newCounter" return="org.apache.hadoop.metrics2.lib.MutableCounterLong"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="iVal" type="long"/>
+      <doc>
+      <![CDATA[Create a mutable long integer counter
+ @param info  metadata of the metric
+ @param iVal  initial value
+ @return a new counter object]]>
+      </doc>
+    </method>
+    <method name="newGauge" return="org.apache.hadoop.metrics2.lib.MutableGaugeInt"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="desc" type="java.lang.String"/>
+      <param name="iVal" type="int"/>
+      <doc>
+      <![CDATA[Create a mutable integer gauge
+ @param name  of the metric
+ @param desc  metric description
+ @param iVal  initial value
+ @return a new gauge object]]>
+      </doc>
+    </method>
+    <method name="newGauge" return="org.apache.hadoop.metrics2.lib.MutableGaugeInt"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="iVal" type="int"/>
+      <doc>
+      <![CDATA[Create a mutable integer gauge
+ @param info  metadata of the metric
+ @param iVal  initial value
+ @return a new gauge object]]>
+      </doc>
+    </method>
+    <method name="newGauge" return="org.apache.hadoop.metrics2.lib.MutableGaugeLong"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="desc" type="java.lang.String"/>
+      <param name="iVal" type="long"/>
+      <doc>
+      <![CDATA[Create a mutable long integer gauge
+ @param name  of the metric
+ @param desc  metric description
+ @param iVal  initial value
+ @return a new gauge object]]>
+      </doc>
+    </method>
+    <method name="newGauge" return="org.apache.hadoop.metrics2.lib.MutableGaugeLong"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="iVal" type="long"/>
+      <doc>
+      <![CDATA[Create a mutable long integer gauge
+ @param info  metadata of the metric
+ @param iVal  initial value
+ @return a new gauge object]]>
+      </doc>
+    </method>
+    <method name="newGauge" return="org.apache.hadoop.metrics2.lib.MutableGaugeFloat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="desc" type="java.lang.String"/>
+      <param name="iVal" type="float"/>
+      <doc>
+      <![CDATA[Create a mutable float gauge
+ @param name  of the metric
+ @param desc  metric description
+ @param iVal  initial value
+ @return a new gauge object]]>
+      </doc>
+    </method>
+    <method name="newGauge" return="org.apache.hadoop.metrics2.lib.MutableGaugeFloat"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="iVal" type="float"/>
+      <doc>
+      <![CDATA[Create a mutable float gauge
+ @param info  metadata of the metric
+ @param iVal  initial value
+ @return a new gauge object]]>
+      </doc>
+    </method>
+    <method name="newQuantiles" return="org.apache.hadoop.metrics2.lib.MutableQuantiles"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="desc" type="java.lang.String"/>
+      <param name="sampleName" type="java.lang.String"/>
+      <param name="valueName" type="java.lang.String"/>
+      <param name="interval" type="int"/>
+      <doc>
+      <![CDATA[Create a mutable metric that estimates quantiles of a stream of values
+ @param name of the metric
+ @param desc metric description
+ @param sampleName of the metric (e.g., "Ops")
+ @param valueName of the metric (e.g., "Time" or "Latency")
+ @param interval rollover interval of estimator in seconds
+ @return a new quantile estimator object
+ @throws MetricsException if interval is not a positive integer]]>
+      </doc>
+    </method>
+    <method name="newStat" return="org.apache.hadoop.metrics2.lib.MutableStat"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="desc" type="java.lang.String"/>
+      <param name="sampleName" type="java.lang.String"/>
+      <param name="valueName" type="java.lang.String"/>
+      <param name="extended" type="boolean"/>
+      <doc>
+      <![CDATA[Create a mutable metric with stats
+ @param name  of the metric
+ @param desc  metric description
+ @param sampleName  of the metric (e.g., "Ops")
+ @param valueName   of the metric (e.g., "Time" or "Latency")
+ @param extended    produce extended stat (stdev, min/max etc.) if true.
+ @return a new mutable stat metric object]]>
+      </doc>
+    </method>
+    <method name="newStat" return="org.apache.hadoop.metrics2.lib.MutableStat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="desc" type="java.lang.String"/>
+      <param name="sampleName" type="java.lang.String"/>
+      <param name="valueName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Create a mutable metric with stats
+ @param name  of the metric
+ @param desc  metric description
+ @param sampleName  of the metric (e.g., "Ops")
+ @param valueName   of the metric (e.g., "Time" or "Latency")
+ @return a new mutable metric object]]>
+      </doc>
+    </method>
+    <method name="newRate" return="org.apache.hadoop.metrics2.lib.MutableRate"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Create a mutable rate metric
+ @param name  of the metric
+ @return a new mutable metric object]]>
+      </doc>
+    </method>
+    <method name="newRate" return="org.apache.hadoop.metrics2.lib.MutableRate"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="description" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Create a mutable rate metric
+ @param name  of the metric
+ @param description of the metric
+ @return a new mutable rate metric object]]>
+      </doc>
+    </method>
+    <method name="newRate" return="org.apache.hadoop.metrics2.lib.MutableRate"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="desc" type="java.lang.String"/>
+      <param name="extended" type="boolean"/>
+      <doc>
+      <![CDATA[Create a mutable rate metric (for throughput measurement)
+ @param name  of the metric
+ @param desc  description
+ @param extended  produce extended stat (stdev/min/max etc.) if true
+ @return a new mutable rate metric object]]>
+      </doc>
+    </method>
+    <method name="newRatesWithAggregation" return="org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="newMutableRollingAverages" return="org.apache.hadoop.metrics2.lib.MutableRollingAverages"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="valueName" type="java.lang.String"/>
+    </method>
+    <method name="add"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Add sample to a stat metric by name.
+ @param name  of the metric
+ @param value of the snapshot to add]]>
+      </doc>
+    </method>
+    <method name="setContext" return="org.apache.hadoop.metrics2.lib.MetricsRegistry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the metrics context tag
+ @param name of the context
+ @return the registry itself as a convenience]]>
+      </doc>
+    </method>
+    <method name="tag" return="org.apache.hadoop.metrics2.lib.MetricsRegistry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="description" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a tag to the metrics
+ @param name  of the tag
+ @param description of the tag
+ @param value of the tag
+ @return the registry (for keep adding tags)]]>
+      </doc>
+    </method>
+    <method name="tag" return="org.apache.hadoop.metrics2.lib.MetricsRegistry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="description" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+      <param name="override" type="boolean"/>
+      <doc>
+      <![CDATA[Add a tag to the metrics
+ @param name  of the tag
+ @param description of the tag
+ @param value of the tag
+ @param override  existing tag if true
+ @return the registry (for keep adding tags)]]>
+      </doc>
+    </method>
+    <method name="tag" return="org.apache.hadoop.metrics2.lib.MetricsRegistry"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="java.lang.String"/>
+      <param name="override" type="boolean"/>
+      <doc>
+      <![CDATA[Add a tag to the metrics
+ @param info  metadata of the tag
+ @param value of the tag
+ @param override existing tag if true
+ @return the registry (for keep adding tags etc.)]]>
+      </doc>
+    </method>
+    <method name="tag" return="org.apache.hadoop.metrics2.lib.MetricsRegistry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.metrics2.MetricsInfo"/>
+      <param name="value" type="java.lang.String"/>
+    </method>
+    <method name="snapshot"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="builder" type="org.apache.hadoop.metrics2.MetricsRecordBuilder"/>
+      <param name="all" type="boolean"/>
+      <doc>
+      <![CDATA[Sample all the mutable metrics and put the snapshot in the builder
+ @param builder to contain the metrics snapshot
+ @param all get all the metrics even if the values are not changed.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[An optional metrics registry class for creating and maintaining a
+ collection of MetricsMutables, making writing metrics source easier.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.MetricsRegistry -->
+  <!-- start class org.apache.hadoop.metrics2.lib.MutableCounter -->
+  <class name="MutableCounter" extends="org.apache.hadoop.metrics2.lib.MutableMetric"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MutableCounter" type="org.apache.hadoop.metrics2.MetricsInfo"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="info" return="org.apache.hadoop.metrics2.MetricsInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="incr"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Increment the metric value by 1.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The mutable counter (monotonically increasing) metric interface]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.MutableCounter -->
+  <!-- start class org.apache.hadoop.metrics2.lib.MutableCounterInt -->
+  <class name="MutableCounterInt" extends="org.apache.hadoop.metrics2.lib.MutableCounter"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="incr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="incr"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delta" type="int"/>
+      <doc>
+      <![CDATA[Increment the value by a delta
+ @param delta of the increment]]>
+      </doc>
+    </method>
+    <method name="value" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="snapshot"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="builder" type="org.apache.hadoop.metrics2.MetricsRecordBuilder"/>
+      <param name="all" type="boolean"/>
+    </method>
+    <doc>
+    <![CDATA[A mutable int counter for implementing metrics sources]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.MutableCounterInt -->
+  <!-- start class org.apache.hadoop.metrics2.lib.MutableCounterLong -->
+  <class name="MutableCounterLong" extends="org.apache.hadoop.metrics2.lib.MutableCounter"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MutableCounterLong" type="org.apache.hadoop.metrics2.MetricsInfo, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="incr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="incr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delta" type="long"/>
+      <doc>
+      <![CDATA[Increment the value by a delta
+ @param delta of the increment]]>
+      </doc>
+    </method>
+    <method name="value" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="snapshot"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="builder" type="org.apache.hadoop.metrics2.MetricsRecordBuilder"/>
+      <param name="all" type="boolean"/>
+    </method>
+    <doc>
+    <![CDATA[A mutable long counter]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.MutableCounterLong -->
+  <!-- start class org.apache.hadoop.metrics2.lib.MutableGauge -->
+  <class name="MutableGauge" extends="org.apache.hadoop.metrics2.lib.MutableMetric"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MutableGauge" type="org.apache.hadoop.metrics2.MetricsInfo"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="info" return="org.apache.hadoop.metrics2.MetricsInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="incr"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Increment the value of the metric by 1]]>
+      </doc>
+    </method>
+    <method name="decr"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Decrement the value of the metric by 1]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The mutable gauge metric interface]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.MutableGauge -->
+  <!-- start class org.apache.hadoop.metrics2.lib.MutableGaugeInt -->
+  <class name="MutableGaugeInt" extends="org.apache.hadoop.metrics2.lib.MutableGauge"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="value" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="incr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="incr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delta" type="int"/>
+      <doc>
+      <![CDATA[Increment by delta
+ @param delta of the increment]]>
+      </doc>
+    </method>
+    <method name="decr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="decr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delta" type="int"/>
+      <doc>
+      <![CDATA[decrement by delta
+ @param delta of the decrement]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="int"/>
+      <doc>
+      <![CDATA[Set the value of the metric
+ @param value to set]]>
+      </doc>
+    </method>
+    <method name="snapshot"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="builder" type="org.apache.hadoop.metrics2.MetricsRecordBuilder"/>
+      <param name="all" type="boolean"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return  the value of the metric]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A mutable int gauge]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.MutableGaugeInt -->
+  <!-- start class org.apache.hadoop.metrics2.lib.MutableGaugeLong -->
+  <class name="MutableGaugeLong" extends="org.apache.hadoop.metrics2.lib.MutableGauge"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="value" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="incr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="incr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delta" type="long"/>
+      <doc>
+      <![CDATA[Increment by delta
+ @param delta of the increment]]>
+      </doc>
+    </method>
+    <method name="decr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="decr"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delta" type="long"/>
+      <doc>
+      <![CDATA[decrement by delta
+ @param delta of the decrement]]>
+      </doc>
+    </method>
+    <method name="set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Set the value of the metric
+ @param value to set]]>
+      </doc>
+    </method>
+    <method name="snapshot"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="builder" type="org.apache.hadoop.metrics2.MetricsRecordBuilder"/>
+      <param name="all" type="boolean"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return  the value of the metric]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A mutable long gauge]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.MutableGaugeLong -->
+  <!-- start class org.apache.hadoop.metrics2.lib.MutableMetric -->
+  <class name="MutableMetric" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MutableMetric"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="snapshot"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="builder" type="org.apache.hadoop.metrics2.MetricsRecordBuilder"/>
+      <param name="all" type="boolean"/>
+      <doc>
+      <![CDATA[Get a snapshot of the metric
+ @param builder the metrics record builder
+ @param all if true, snapshot unchanged metrics as well]]>
+      </doc>
+    </method>
+    <method name="snapshot"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="builder" type="org.apache.hadoop.metrics2.MetricsRecordBuilder"/>
+      <doc>
+      <![CDATA[Get a snapshot of metric if changed
+ @param builder the metrics record builder]]>
+      </doc>
+    </method>
+    <method name="setChanged"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Set the changed flag in mutable operations]]>
+      </doc>
+    </method>
+    <method name="clearChanged"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Clear the changed flag in the snapshot operations]]>
+      </doc>
+    </method>
+    <method name="changed" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return  true if metric is changed since last snapshot/snapshot]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The mutable metric interface]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.MutableMetric -->
+  <!-- start class org.apache.hadoop.metrics2.lib.MutableQuantiles -->
+  <class name="MutableQuantiles" extends="org.apache.hadoop.metrics2.lib.MutableMetric"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MutableQuantiles" type="java.lang.String, java.lang.String, java.lang.String, java.lang.String, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Instantiates a new {@link MutableQuantiles} for a metric that rolls itself
+ over on the specified time interval.
+ 
+ @param name
+          of the metric
+ @param description
+          long-form textual description of the metric
+ @param sampleName
+          type of items in the stream (e.g., "Ops")
+ @param valueName
+          type of the values
+ @param interval
+          rollover interval (in seconds) of the estimator]]>
+      </doc>
+    </constructor>
+    <method name="snapshot"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="builder" type="org.apache.hadoop.metrics2.MetricsRecordBuilder"/>
+      <param name="all" type="boolean"/>
+    </method>
+    <method name="add"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="long"/>
+    </method>
+    <method name="getInterval" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="stop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getEstimator" return="org.apache.hadoop.metrics2.util.QuantileEstimator"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the quantile estimator.
+
+ @return the quantile estimator]]>
+      </doc>
+    </method>
+    <method name="setEstimator"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="quantileEstimator" type="org.apache.hadoop.metrics2.util.QuantileEstimator"/>
+    </method>
+    <field name="quantiles" type="org.apache.hadoop.metrics2.util.Quantile[]"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="previousSnapshot" type="java.util.Map"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Watches a stream of long values, maintaining online estimates of specific
+ quantiles with provably low error bounds. This is particularly useful for
+ accurate high-percentile (e.g. 95th, 99th) latency metrics.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.MutableQuantiles -->
+  <!-- start class org.apache.hadoop.metrics2.lib.MutableRate -->
+  <class name="MutableRate" extends="org.apache.hadoop.metrics2.lib.MutableStat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <doc>
+    <![CDATA[A convenient mutable metric for throughput measurement]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.MutableRate -->
+  <!-- start class org.apache.hadoop.metrics2.lib.MutableRates -->
+  <class name="MutableRates" extends="org.apache.hadoop.metrics2.lib.MutableMetric"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Initialize the registry with all the methods in a protocol
+ so they all show up in the first snapshot.
+ Convenient for JMX implementations.
+ @param protocol the protocol class]]>
+      </doc>
+    </method>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="elapsed" type="long"/>
+      <doc>
+      <![CDATA[Add a rate sample for a rate metric
+ @param name of the rate metric
+ @param elapsed time]]>
+      </doc>
+    </method>
+    <method name="snapshot"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="rb" type="org.apache.hadoop.metrics2.MetricsRecordBuilder"/>
+      <param name="all" type="boolean"/>
+    </method>
+    <doc>
+    <![CDATA[Helper class to manage a group of mutable rate metrics
+
+ This class synchronizes all accesses to the metrics it
+ contains, so it should not be used in situations where
+ there is high contention on the metrics.
+ {@link MutableRatesWithAggregation} is preferable in that
+ situation.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.MutableRates -->
+  <!-- start class org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation -->
+  <class name="MutableRatesWithAggregation" extends="org.apache.hadoop.metrics2.lib.MutableMetric"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MutableRatesWithAggregation"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Initialize the registry with all the methods in a protocol
+ so they all show up in the first snapshot.
+ Convenient for JMX implementations.
+ @param protocol the protocol class]]>
+      </doc>
+    </method>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="names" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Initialize the registry with all rate names passed in.
+ This is an alternative to the above init function since this metric
+ can be used more than just for rpc name.
+ @param names the array of all rate names]]>
+      </doc>
+    </method>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="elapsed" type="long"/>
+      <doc>
+      <![CDATA[Add a rate sample for a rate metric.
+ @param name of the rate metric
+ @param elapsed time]]>
+      </doc>
+    </method>
+    <method name="snapshot"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="rb" type="org.apache.hadoop.metrics2.MetricsRecordBuilder"/>
+      <param name="all" type="boolean"/>
+    </method>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="prefix" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Helper class to manage a group of mutable rate metrics.
+
+ Each thread will maintain a local rate count, and upon snapshot,
+ these values will be aggregated into a global rate. This class
+ should only be used for long running threads, as any metrics
+ produced between the last snapshot and the death of a thread
+ will be lost. This allows for significantly higher concurrency
+ than {@link MutableRates}. See HADOOP-24420.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation -->
+  <!-- start class org.apache.hadoop.metrics2.lib.MutableRollingAverages -->
+  <class name="MutableRollingAverages" extends="org.apache.hadoop.metrics2.lib.MutableMetric"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Closeable"/>
+    <constructor name="MutableRollingAverages" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor for {@link MutableRollingAverages}.
+ @param metricValueName]]>
+      </doc>
+    </constructor>
+    <method name="snapshot"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="builder" type="org.apache.hadoop.metrics2.MetricsRecordBuilder"/>
+      <param name="all" type="boolean"/>
+    </method>
+    <method name="collectThreadLocalStates"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Collects states maintained in {@link ThreadLocal}, if any.]]>
+      </doc>
+    </method>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[@param name
+          name of metric
+ @param value
+          value of metric]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getStats" return="java.util.Map"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="minSamples" type="long"/>
+      <doc>
+      <![CDATA[Retrieve a map of metric name {@literal ->} (aggregate).
+ Filter out entries that don't have at least minSamples.
+
+ @return a map of peer DataNode Id to the average latency to that
+         node seen over the measurement period.]]>
+      </doc>
+    </method>
+    <method name="setRecordValidityMs"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Use for test only.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ This class maintains a group of rolling average metrics. It implements the
+ algorithm of rolling average, i.e. a number of sliding windows are kept to
+ roll over and evict old subsets of samples. Each window has a subset of
+ samples in a stream, where sub-sum and sub-total are collected. All sub-sums
+ and sub-totals in all windows will be aggregated to final-sum and final-total
+ used to compute final average, which is called rolling average.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.MutableRollingAverages -->
+  <!-- start class org.apache.hadoop.metrics2.lib.MutableStat -->
+  <class name="MutableStat" extends="org.apache.hadoop.metrics2.lib.MutableMetric"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MutableStat" type="java.lang.String, java.lang.String, java.lang.String, java.lang.String, boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a sample statistics metric
+ @param name        of the metric
+ @param description of the metric
+ @param sampleName  of the metric (e.g. "Ops")
+ @param valueName   of the metric (e.g. "Time", "Latency")
+ @param extended    create extended stats (stdev, min/max etc.) by default.]]>
+      </doc>
+    </constructor>
+    <constructor name="MutableStat" type="java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a snapshot stat metric with extended stat off by default
+ @param name        of the metric
+ @param description of the metric
+ @param sampleName  of the metric (e.g. "Ops")
+ @param valueName   of the metric (e.g. "Time", "Latency")]]>
+      </doc>
+    </constructor>
+    <method name="setExtended"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="extended" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether to display the extended stats (stdev, min/max etc.) or not
+ @param extended enable/disable displaying extended stats]]>
+      </doc>
+    </method>
+    <method name="setUpdateTimeStamp"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="updateTimeStamp" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether to update the snapshot time or not.
+ @param updateTimeStamp enable update stats snapshot timestamp]]>
+      </doc>
+    </method>
+    <method name="add"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numSamples" type="long"/>
+      <param name="sum" type="long"/>
+      <doc>
+      <![CDATA[Add a number of samples and their sum to the running stat
+
+ Note that although use of this method will preserve accurate mean values,
+ large values for numSamples may result in inaccurate variance values due
+ to the use of a single step of the Welford method for variance calculation.
+ @param numSamples  number of samples
+ @param sum of the samples]]>
+      </doc>
+    </method>
+    <method name="add"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Add a snapshot to the metric.
+ @param value of the metric]]>
+      </doc>
+    </method>
+    <method name="snapshot"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="builder" type="org.apache.hadoop.metrics2.MetricsRecordBuilder"/>
+      <param name="all" type="boolean"/>
+    </method>
+    <method name="lastStat" return="org.apache.hadoop.metrics2.util.SampleStat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return a SampleStat object that supports
+ calls like StdDev and Mean.
+ @return SampleStat]]>
+      </doc>
+    </method>
+    <method name="resetMinMax"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Reset the all time min max of the metric]]>
+      </doc>
+    </method>
+    <method name="getSnapshotTimeStamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the SampleStat snapshot timestamp]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A mutable metric with stats.
+
+ Useful for keeping throughput/latency stats.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.lib.MutableStat -->
+</package>
+<package name="org.apache.hadoop.metrics2.sink">
+  <!-- start class org.apache.hadoop.metrics2.sink.FileSink -->
+  <class name="FileSink" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.metrics2.MetricsSink"/>
+    <implements name="java.io.Closeable"/>
+    <constructor name="FileSink"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.commons.configuration2.SubsetConfiguration"/>
+    </method>
+    <method name="putMetrics"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="record" type="org.apache.hadoop.metrics2.MetricsRecord"/>
+    </method>
+    <method name="flush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A metrics sink that writes to a file]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.sink.FileSink -->
+  <!-- start class org.apache.hadoop.metrics2.sink.GraphiteSink -->
+  <class name="GraphiteSink" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.metrics2.MetricsSink"/>
+    <implements name="java.io.Closeable"/>
+    <constructor name="GraphiteSink"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.commons.configuration2.SubsetConfiguration"/>
+    </method>
+    <method name="putMetrics"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="record" type="org.apache.hadoop.metrics2.MetricsRecord"/>
+    </method>
+    <method name="flush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A metrics sink that writes to a Graphite server]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.sink.GraphiteSink -->
+  <!-- start class org.apache.hadoop.metrics2.sink.RollingFileSystemSink -->
+  <class name="RollingFileSystemSink" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.metrics2.MetricsSink"/>
+    <implements name="java.io.Closeable"/>
+    <constructor name="RollingFileSystemSink"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an empty instance.  Required for reflection.]]>
+      </doc>
+    </constructor>
+    <constructor name="RollingFileSystemSink" type="long, long"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an instance for testing.
+
+ @param flushIntervalMillis the roll interval in millis
+ @param flushOffsetIntervalMillis the roll offset interval in millis]]>
+      </doc>
+    </constructor>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="metrics2Properties" type="org.apache.commons.configuration2.SubsetConfiguration"/>
+    </method>
+    <method name="getRollInterval" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Extract the roll interval from the configuration and return it in
+ milliseconds.
+
+ @return the roll interval in millis]]>
+      </doc>
+    </method>
+    <method name="updateFlushTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="now" type="java.util.Date"/>
+      <doc>
+      <![CDATA[Update the {@link #nextFlush} variable to the next flush time. Add
+ an integer number of flush intervals, preserving the initial random offset.
+
+ @param now the current time]]>
+      </doc>
+    </method>
+    <method name="setInitialFlushTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="now" type="java.util.Date"/>
+      <doc>
+      <![CDATA[Set the {@link #nextFlush} variable to the initial flush time. The initial
+ flush will be an integer number of flush intervals past the beginning of
+ the current hour and will have a random offset added, up to
+ {@link #rollOffsetIntervalMillis}. The initial flush will be a time in
+ past that can be used from which to calculate future flush times.
+
+ @param now the current time]]>
+      </doc>
+    </method>
+    <method name="putMetrics"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="record" type="org.apache.hadoop.metrics2.MetricsRecord"/>
+    </method>
+    <method name="flush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="source" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="ignoreError" type="boolean"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="allowAppend" type="boolean"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="basePath" type="org.apache.hadoop.fs.Path"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="rollIntervalMillis" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="rollOffsetIntervalMillis" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="nextFlush" type="java.util.Calendar"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="forceFlush" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="hasFlushed" type="boolean"
+      transient="false" volatile="true"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="suppliedConf" type="org.apache.hadoop.conf.Configuration"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="suppliedFilesystem" type="org.apache.hadoop.fs.FileSystem"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[<p>This class is a metrics sink that uses
+ {@link org.apache.hadoop.fs.FileSystem} to write the metrics logs.  Every
+ roll interval a new directory will be created under the path specified by the
+ <code>basepath</code> property. All metrics will be logged to a file in the
+ current interval's directory in a file named &lt;hostname&gt;.log, where
+ &lt;hostname&gt; is the name of the host on which the metrics logging
+ process is running. The base path is set by the
+ <code>&lt;prefix&gt;.sink.&lt;instance&gt;.basepath</code> property.  The
+ time zone used to create the current interval's directory name is GMT.  If
+ the <code>basepath</code> property isn't specified, it will default to
+ &quot;/tmp&quot;, which is the temp directory on whatever default file
+ system is configured for the cluster.</p>
+
+ <p>The <code>&lt;prefix&gt;.sink.&lt;instance&gt;.ignore-error</code>
+ property controls whether an exception is thrown when an error is encountered
+ writing a log file.  The default value is <code>true</code>.  When set to
+ <code>false</code>, file errors are quietly swallowed.</p>
+
+ <p>The <code>roll-interval</code> property sets the amount of time before
+ rolling the directory. The default value is 1 hour. The roll interval may
+ not be less than 1 minute. The property's value should be given as
+ <i>number unit</i>, where <i>number</i> is an integer value, and
+ <i>unit</i> is a valid unit.  Valid units are <i>minute</i>, <i>hour</i>,
+ and <i>day</i>.  The units are case insensitive and may be abbreviated or
+ plural. If no units are specified, hours are assumed. For example,
+ &quot;2&quot;, &quot;2h&quot;, &quot;2 hour&quot;, and
+ &quot;2 hours&quot; are all valid ways to specify two hours.</p>
+
+ <p>The <code>roll-offset-interval-millis</code> property sets the upper
+ bound on a random time interval (in milliseconds) that is used to delay
+ before the initial roll.  All subsequent rolls will happen an integer
+ number of roll intervals after the initial roll, hence retaining the original
+ offset. The purpose of this property is to insert some variance in the roll
+ times so that large clusters using this sink on every node don't cause a
+ performance impact on HDFS by rolling simultaneously.  The default value is
+ 30000 (30s).  When writing to HDFS, as a rule of thumb, the roll offset in
+ millis should be no less than the number of sink instances times 5.
+
+ <p>The primary use of this class is for logging to HDFS.  As it uses
+ {@link org.apache.hadoop.fs.FileSystem} to access the target file system,
+ however, it can be used to write to the local file system, Amazon S3, or any
+ other supported file system.  The base path for the sink will determine the
+ file system used.  An unqualified path will write to the default file system
+ set by the configuration.</p>
+
+ <p>Not all file systems support the ability to append to files.  In file
+ systems without the ability to append to files, only one writer can write to
+ a file at a time.  To allow for concurrent writes from multiple daemons on a
+ single host, the <code>source</code> property is used to set unique headers
+ for the log files.  The property should be set to the name of
+ the source daemon, e.g. <i>namenode</i>.  The value of the
+ <code>source</code> property should typically be the same as the property's
+ prefix.  If this property is not set, the source is taken to be
+ <i>unknown</i>.</p>
+
+ <p>Instead of appending to an existing file, by default the sink
+ will create a new file with a suffix of &quot;.&lt;n&gt;&quot;, where
+ <i>n</i> is the next lowest integer that isn't already used in a file name,
+ similar to the Hadoop daemon logs.  NOTE: the file with the <b>highest</b>
+ sequence number is the <b>newest</b> file, unlike the Hadoop daemon logs.</p>
+
+ <p>For file systems that allow append, the sink supports appending to the
+ existing file instead. If the <code>allow-append</code> property is set to
+ true, the sink will instead append to the existing file on file systems that
+ support appends. By default, the <code>allow-append</code> property is
+ false.</p>
+
+ <p>Note that when writing to HDFS with <code>allow-append</code> set to true,
+ there is a minimum acceptable number of data nodes.  If the number of data
+ nodes drops below that minimum, the append will succeed, but reading the
+ data will fail with an IOException in the DataStreamer class.  The minimum
+ number of data nodes required for a successful append is generally 2 or
+ 3.</p>
+
+ <p>Note also that when writing to HDFS, the file size information is not
+ updated until the file is closed (at the end of the interval) even though
+ the data is being written successfully. This is a known HDFS limitation that
+ exists because of the performance cost of updating the metadata.  See
+ <a href="https://issues.apache.org/jira/browse/HDFS-5478">HDFS-5478</a>.</p>
+
+ <p>When using this sink in a secure (Kerberos) environment, two additional
+ properties must be set: <code>keytab-key</code> and
+ <code>principal-key</code>. <code>keytab-key</code> should contain the key by
+ which the keytab file can be found in the configuration, for example,
+ <code>yarn.nodemanager.keytab</code>. <code>principal-key</code> should
+ contain the key by which the principal can be found in the configuration,
+ for example, <code>yarn.nodemanager.principal</code>.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.sink.RollingFileSystemSink -->
+  <!-- start class org.apache.hadoop.metrics2.sink.StatsDSink -->
+  <class name="StatsDSink" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.metrics2.MetricsSink"/>
+    <implements name="java.io.Closeable"/>
+    <constructor name="StatsDSink"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.commons.configuration2.SubsetConfiguration"/>
+    </method>
+    <method name="putMetrics"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="record" type="org.apache.hadoop.metrics2.MetricsRecord"/>
+    </method>
+    <method name="writeMetric"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="line" type="java.lang.String"/>
+    </method>
+    <method name="flush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A metrics sink that writes metrics to a StatsD daemon.
+ This sink will produce metrics of the form
+ '[hostname].servicename.context.name.metricname:value|type'
+ where hostname is optional. This is useful when sending to
+ a daemon that is running on the localhost and will add the
+ hostname to the metric (such as the
+ <a href="https://collectd.org/">CollectD</a> StatsD plugin).
+ <br>
+ To configure this plugin, you will need to add the following
+ entries to your hadoop-metrics2.properties file:
+ <br>
+ <pre>
+ *.sink.statsd.class=org.apache.hadoop.metrics2.sink.StatsDSink
+ [prefix].sink.statsd.server.host=
+ [prefix].sink.statsd.server.port=
+ [prefix].sink.statsd.skip.hostname=true|false (optional)
+ [prefix].sink.statsd.service.name=NameNode (name you want for service)
+ </pre>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.sink.StatsDSink -->
+</package>
+<package name="org.apache.hadoop.metrics2.sink.ganglia">
+</package>
+<package name="org.apache.hadoop.metrics2.source">
+</package>
+<package name="org.apache.hadoop.metrics2.util">
+  <!-- start class org.apache.hadoop.metrics2.util.MBeans -->
+  <class name="MBeans" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="register" return="javax.management.ObjectName"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="serviceName" type="java.lang.String"/>
+      <param name="nameName" type="java.lang.String"/>
+      <param name="theMbean" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Register the MBean using our standard MBeanName format
+ "hadoop:service={@literal <serviceName>,name=<nameName>}"
+ Where the {@literal <serviceName> and <nameName>} are the supplied
+ parameters.
+
+ @param serviceName
+ @param nameName
+ @param theMbean - the MBean to register
+ @return the named used to register the MBean]]>
+      </doc>
+    </method>
+    <method name="register" return="javax.management.ObjectName"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="serviceName" type="java.lang.String"/>
+      <param name="nameName" type="java.lang.String"/>
+      <param name="properties" type="java.util.Map"/>
+      <param name="theMbean" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Register the MBean using our standard MBeanName format
+ "hadoop:service={@literal <serviceName>,name=<nameName>}"
+ Where the {@literal <serviceName> and <nameName>} are the supplied
+ parameters.
+
+ @param serviceName
+ @param nameName
+ @param properties - Key value pairs to define additional JMX ObjectName
+                     properties.
+ @param theMbean    - the MBean to register
+ @return the named used to register the MBean]]>
+      </doc>
+    </method>
+    <method name="getMbeanNameService" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="objectName" type="javax.management.ObjectName"/>
+    </method>
+    <method name="getMbeanNameName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="objectName" type="javax.management.ObjectName"/>
+    </method>
+    <method name="unregister"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="mbeanName" type="javax.management.ObjectName"/>
+    </method>
+    <doc>
+    <![CDATA[This util class provides a method to register an MBean using
+ our standard naming convention as described in the doc
+  for {link {@link #register(String, String, Object)}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.util.MBeans -->
+  <!-- start class org.apache.hadoop.metrics2.util.MetricsCache -->
+  <class name="MetricsCache" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MetricsCache"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="MetricsCache" type="int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a metrics cache
+ @param maxRecsPerName  limit of the number records per record name]]>
+      </doc>
+    </constructor>
+    <method name="update" return="org.apache.hadoop.metrics2.util.MetricsCache.Record"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="mr" type="org.apache.hadoop.metrics2.MetricsRecord"/>
+      <param name="includingTags" type="boolean"/>
+      <doc>
+      <![CDATA[Update the cache and return the current cached record
+ @param mr the update record
+ @param includingTags cache tag values (for later lookup by name) if true
+ @return the updated cache record]]>
+      </doc>
+    </method>
+    <method name="update" return="org.apache.hadoop.metrics2.util.MetricsCache.Record"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="mr" type="org.apache.hadoop.metrics2.MetricsRecord"/>
+      <doc>
+      <![CDATA[Update the cache and return the current cache record
+ @param mr the update record
+ @return the updated cache record]]>
+      </doc>
+    </method>
+    <method name="get" return="org.apache.hadoop.metrics2.util.MetricsCache.Record"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="tags" type="java.util.Collection"/>
+      <doc>
+      <![CDATA[Get the cached record
+ @param name of the record
+ @param tags of the record
+ @return the cached record or null]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A metrics cache for sinks that don't support sparse updates.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.util.MetricsCache -->
+  <!-- start class org.apache.hadoop.metrics2.util.Servers -->
+  <class name="Servers" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="parse" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="specs" type="java.lang.String"/>
+      <param name="defaultPort" type="int"/>
+      <doc>
+      <![CDATA[Parses a space and/or comma separated sequence of server specifications
+ of the form <i>hostname</i> or <i>hostname:port</i>.  If
+ the specs string is null, defaults to localhost:defaultPort.
+
+ @param specs   server specs (see description)
+ @param defaultPort the default port if not specified
+ @return a list of InetSocketAddress objects.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Helpers to handle server addresses]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.metrics2.util.Servers -->
+</package>
+<package name="org.apache.hadoop.net">
+  <!-- start class org.apache.hadoop.net.AbstractDNSToSwitchMapping -->
+  <class name="AbstractDNSToSwitchMapping" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.net.DNSToSwitchMapping"/>
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="AbstractDNSToSwitchMapping"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an unconfigured instance]]>
+      </doc>
+    </constructor>
+    <constructor name="AbstractDNSToSwitchMapping" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an instance, caching the configuration file.
+ This constructor does not call {@link #setConf(Configuration)}; if
+ a subclass extracts information in that method, it must call it explicitly.
+ @param conf the configuration]]>
+      </doc>
+    </constructor>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="isSingleSwitch" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Predicate that indicates that the switch mapping is known to be
+ single-switch. The base class returns false: it assumes all mappings are
+ multi-rack. Subclasses may override this with methods that are more aware
+ of their topologies.
+
+ <p>
+
+ This method is used when parts of Hadoop need know whether to apply
+ single rack vs multi-rack policies, such as during block placement.
+ Such algorithms behave differently if they are on multi-switch systems.
+ </p>
+
+ @return true if the mapping thinks that it is on a single switch]]>
+      </doc>
+    </method>
+    <method name="getSwitchMap" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a copy of the map (for diagnostics)
+ @return a clone of the map or null for none known]]>
+      </doc>
+    </method>
+    <method name="dumpTopology" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Generate a string listing the switch mapping implementation,
+ the mapping for every known node and the number of nodes and
+ unique switches known about -each entry to a separate line.
+ @return a string that can be presented to the ops team or used in
+ debug messages.]]>
+      </doc>
+    </method>
+    <method name="isSingleSwitchByScriptPolicy" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="isMappingSingleSwitch" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="mapping" type="org.apache.hadoop.net.DNSToSwitchMapping"/>
+      <doc>
+      <![CDATA[Query for a {@link DNSToSwitchMapping} instance being on a single
+ switch.
+ <p>
+ This predicate simply assumes that all mappings not derived from
+ this class are multi-switch.
+ @param mapping the mapping to query
+ @return true if the base class says it is single switch, or the mapping
+ is not derived from this class.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is a base class for DNS to Switch mappings. <p> It is not mandatory to
+ derive {@link DNSToSwitchMapping} implementations from it, but it is strongly
+ recommended, as it makes it easy for the Hadoop developers to add new methods
+ to this base class that are automatically picked up by all implementations.
+ <p>
+
+ This class does not extend the <code>Configured</code>
+ base class, and should not be changed to do so, as it causes problems
+ for subclasses. The constructor of the <code>Configured</code> calls
+ the  {@link #setConf(Configuration)} method, which will call into the
+ subclasses before they have been fully constructed.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.net.AbstractDNSToSwitchMapping -->
+  <!-- start class org.apache.hadoop.net.CachedDNSToSwitchMapping -->
+  <class name="CachedDNSToSwitchMapping" extends="org.apache.hadoop.net.AbstractDNSToSwitchMapping"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CachedDNSToSwitchMapping" type="org.apache.hadoop.net.DNSToSwitchMapping"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[cache a raw DNS mapping
+ @param rawMapping the raw mapping to cache]]>
+      </doc>
+    </constructor>
+    <method name="resolve" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="names" type="java.util.List"/>
+    </method>
+    <method name="getSwitchMap" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the (host x switch) map.
+ @return a copy of the cached map of hosts to rack]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isSingleSwitch" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Delegate the switch topology query to the raw mapping, via
+ {@link AbstractDNSToSwitchMapping#isMappingSingleSwitch(DNSToSwitchMapping)}
+ @return true iff the raw mapper is considered single-switch.]]>
+      </doc>
+    </method>
+    <method name="reloadCachedMappings"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="reloadCachedMappings"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="names" type="java.util.List"/>
+    </method>
+    <field name="rawMapping" type="org.apache.hadoop.net.DNSToSwitchMapping"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The uncached mapping]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[A cached implementation of DNSToSwitchMapping that takes an
+ raw DNSToSwitchMapping and stores the resolved network location in 
+ a cache. The following calls to a resolved network location
+ will get its location from the cache.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.net.CachedDNSToSwitchMapping -->
+  <!-- start class org.apache.hadoop.net.ConnectTimeoutException -->
+  <class name="ConnectTimeoutException" extends="java.net.SocketTimeoutException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ConnectTimeoutException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Thrown by {@link NetUtils#connect(java.net.Socket, java.net.SocketAddress, int)}
+ if it times out while connecting to the remote host.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.net.ConnectTimeoutException -->
+  <!-- start interface org.apache.hadoop.net.DNSToSwitchMapping -->
+  <interface name="DNSToSwitchMapping"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="resolve" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="names" type="java.util.List"/>
+      <doc>
+      <![CDATA[Resolves a list of DNS-names/IP-addresses and returns back a list of
+ switch information (network paths). One-to-one correspondence must be 
+ maintained between the elements in the lists. 
+ Consider an element in the argument list - x.y.com. The switch information
+ that is returned must be a network path of the form /foo/rack, 
+ where / is the root, and 'foo' is the switch where 'rack' is connected.
+ Note the hostname/ip-address is not part of the returned path.
+ The network topology of the cluster would determine the number of
+ components in the network path.
+ <p>
+
+ If a name cannot be resolved to a rack, the implementation
+ should return {@link NetworkTopology#DEFAULT_RACK}. This
+ is what the bundled implementations do, though it is not a formal requirement
+
+ @param names the list of hosts to resolve (can be empty)
+ @return list of resolved network paths.
+ If <i>names</i> is empty, the returned list is also empty]]>
+      </doc>
+    </method>
+    <method name="reloadCachedMappings"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Reload all of the cached mappings.
+
+ If there is a cache, this method will clear it, so that future accesses
+ will get a chance to see the new data.]]>
+      </doc>
+    </method>
+    <method name="reloadCachedMappings"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="names" type="java.util.List"/>
+      <doc>
+      <![CDATA[Reload cached mappings on specific nodes.
+
+ If there is a cache on these nodes, this method will clear it, so that 
+ future accesses will see updated data.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[An interface that must be implemented to allow pluggable
+ DNS-name/IP-address to RackID resolvers.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.net.DNSToSwitchMapping -->
+  <!-- start class org.apache.hadoop.net.ScriptBasedMapping -->
+  <class name="ScriptBasedMapping" extends="org.apache.hadoop.net.CachedDNSToSwitchMapping"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ScriptBasedMapping"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an instance with the default configuration.
+ <p>
+ Calling {@link #setConf(Configuration)} will trigger a
+ re-evaluation of the configuration settings and so be used to
+ set up the mapping script.]]>
+      </doc>
+    </constructor>
+    <constructor name="ScriptBasedMapping" type="org.apache.hadoop.net.DNSToSwitchMapping"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an instance from the given raw mapping
+ @param rawMap raw DNSTOSwithMapping]]>
+      </doc>
+    </constructor>
+    <constructor name="ScriptBasedMapping" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an instance from the given configuration
+ @param conf configuration]]>
+      </doc>
+    </constructor>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[{@inheritDoc}
+ <p>
+ This will get called in the superclass constructor, so a check is needed
+ to ensure that the raw mapping is defined before trying to relaying a null
+ configuration.
+ @param conf]]>
+      </doc>
+    </method>
+    <field name="NO_SCRIPT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Text used in the {@link #toString()} method if there is no string
+ {@value}]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[This class implements the {@link DNSToSwitchMapping} interface using a 
+ script configured via the
+ {@link CommonConfigurationKeys#NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY} option.
+ <p>
+ It contains a static class <code>RawScriptBasedMapping</code> that performs
+ the work: reading the configuration parameters, executing any defined
+ script, handling errors and such like. The outer
+ class extends {@link CachedDNSToSwitchMapping} to cache the delegated
+ queries.
+ <p>
+ This DNS mapper's {@link #isSingleSwitch()} predicate returns
+ true if and only if a script is defined.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.net.ScriptBasedMapping -->
+  <!-- start class org.apache.hadoop.net.SocksSocketFactory -->
+  <class name="SocksSocketFactory" extends="javax.net.SocketFactory"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="SocksSocketFactory"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default empty constructor (for use with the reflection API).]]>
+      </doc>
+    </constructor>
+    <constructor name="SocksSocketFactory" type="java.net.Proxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor with a supplied Proxy
+ 
+ @param proxy the proxy to use to create sockets]]>
+      </doc>
+    </constructor>
+    <method name="createSocket" return="java.net.Socket"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createSocket" return="java.net.Socket"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="port" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createSocket" return="java.net.Socket"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="port" type="int"/>
+      <param name="localHostAddr" type="java.net.InetAddress"/>
+      <param name="localPort" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createSocket" return="java.net.Socket"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="host" type="java.lang.String"/>
+      <param name="port" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+    </method>
+    <method name="createSocket" return="java.net.Socket"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="host" type="java.lang.String"/>
+      <param name="port" type="int"/>
+      <param name="localHostAddr" type="java.net.InetAddress"/>
+      <param name="localPort" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <doc>
+    <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.net.SocksSocketFactory -->
+  <!-- start class org.apache.hadoop.net.StandardSocketFactory -->
+  <class name="StandardSocketFactory" extends="javax.net.SocketFactory"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="StandardSocketFactory"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default empty constructor (for use with the reflection API).]]>
+      </doc>
+    </constructor>
+    <method name="createSocket" return="java.net.Socket"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createSocket" return="java.net.Socket"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="port" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createSocket" return="java.net.Socket"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="port" type="int"/>
+      <param name="localHostAddr" type="java.net.InetAddress"/>
+      <param name="localPort" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createSocket" return="java.net.Socket"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="host" type="java.lang.String"/>
+      <param name="port" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+    </method>
+    <method name="createSocket" return="java.net.Socket"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="host" type="java.lang.String"/>
+      <param name="port" type="int"/>
+      <param name="localHostAddr" type="java.net.InetAddress"/>
+      <param name="localPort" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.net.StandardSocketFactory -->
+  <!-- start class org.apache.hadoop.net.TableMapping -->
+  <class name="TableMapping" extends="org.apache.hadoop.net.CachedDNSToSwitchMapping"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TableMapping"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="reloadCachedMappings"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[<p>
+ Simple {@link DNSToSwitchMapping} implementation that reads a 2 column text
+ file. The columns are separated by whitespace. The first column is a DNS or
+ IP address and the second column specifies the rack where the address maps.
+ </p>
+ <p>
+ This class uses the configuration parameter {@code
+ net.topology.table.file.name} to locate the mapping file.
+ </p>
+ <p>
+ Calls to {@link #resolve(List)} will look up the address as defined in the
+ mapping file. If no entry corresponding to the address is found, the value
+ {@code /default-rack} is returned.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.net.TableMapping -->
+</package>
+<package name="org.apache.hadoop.net.unix">
+</package>
+<package name="org.apache.hadoop.security">
+  <!-- start class org.apache.hadoop.security.AccessControlException -->
+  <class name="AccessControlException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AccessControlException"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor is needed for unwrapping from 
+ {@link org.apache.hadoop.ipc.RemoteException}.]]>
+      </doc>
+    </constructor>
+    <constructor name="AccessControlException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs an {@link AccessControlException}
+ with the specified detail message.
+ @param s the detail message.]]>
+      </doc>
+    </constructor>
+    <constructor name="AccessControlException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a new exception with the specified cause and a detail
+ message of <tt>(cause==null ? null : cause.toString())</tt> (which
+ typically contains the class and detail message of <tt>cause</tt>).
+ @param  cause the cause (which is saved for later retrieval by the
+         {@link #getCause()} method).  (A <tt>null</tt> value is
+         permitted, and indicates that the cause is nonexistent or
+         unknown.)]]>
+      </doc>
+    </constructor>
+    <doc>
+    <![CDATA[An exception class for access control related issues.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.AccessControlException -->
+  <!-- start class org.apache.hadoop.security.Credentials -->
+  <class name="Credentials" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="Credentials"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an empty credentials instance.]]>
+      </doc>
+    </constructor>
+    <constructor name="Credentials" type="org.apache.hadoop.security.Credentials"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a copy of the given credentials.
+ @param credentials to copy]]>
+      </doc>
+    </constructor>
+    <method name="getToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="alias" type="org.apache.hadoop.io.Text"/>
+      <doc>
+      <![CDATA[Returns the Token object for the alias.
+ @param alias the alias for the Token
+ @return token for this alias]]>
+      </doc>
+    </method>
+    <method name="addToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="alias" type="org.apache.hadoop.io.Text"/>
+      <param name="t" type="org.apache.hadoop.security.token.Token"/>
+      <doc>
+      <![CDATA[Add a token in the storage (in memory).
+ @param alias the alias for the key
+ @param t the token object]]>
+      </doc>
+    </method>
+    <method name="getAllTokens" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return all the tokens in the in-memory map.]]>
+      </doc>
+    </method>
+    <method name="getTokenMap" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns an unmodifiable version of the full map of aliases to Tokens.]]>
+      </doc>
+    </method>
+    <method name="numberOfTokens" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return number of Tokens in the in-memory map]]>
+      </doc>
+    </method>
+    <method name="getSecretKey" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="alias" type="org.apache.hadoop.io.Text"/>
+      <doc>
+      <![CDATA[Returns the key bytes for the alias.
+ @param alias the alias for the key
+ @return key for this alias]]>
+      </doc>
+    </method>
+    <method name="numberOfSecretKeys" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return number of keys in the in-memory map]]>
+      </doc>
+    </method>
+    <method name="addSecretKey"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="alias" type="org.apache.hadoop.io.Text"/>
+      <param name="key" type="byte[]"/>
+      <doc>
+      <![CDATA[Set the key for an alias.
+ @param alias the alias for the key
+ @param key the key bytes]]>
+      </doc>
+    </method>
+    <method name="removeSecretKey"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="alias" type="org.apache.hadoop.io.Text"/>
+      <doc>
+      <![CDATA[Remove the key for a given alias.
+ @param alias the alias for the key]]>
+      </doc>
+    </method>
+    <method name="getAllSecretKeys" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return all the secret key entries in the in-memory map.]]>
+      </doc>
+    </method>
+    <method name="getSecretKeyMap" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns an unmodifiable version of the full map of aliases to secret keys.]]>
+      </doc>
+    </method>
+    <method name="readTokenStorageFile" return="org.apache.hadoop.security.Credentials"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filename" type="org.apache.hadoop.fs.Path"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Convenience method for reading a token storage file and loading its Tokens.
+ @param filename
+ @param conf
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readTokenStorageFile" return="org.apache.hadoop.security.Credentials"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filename" type="java.io.File"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Convenience method for reading a token storage file and loading its Tokens.
+ @param filename
+ @param conf
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readTokenStorageStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Convenience method for reading a token from a DataInputStream.]]>
+      </doc>
+    </method>
+    <method name="writeTokenStorageToStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="os" type="java.io.DataOutputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="writeTokenStorageToStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="os" type="java.io.DataOutputStream"/>
+      <param name="format" type="org.apache.hadoop.security.Credentials.SerializedFormat"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="writeTokenStorageFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filename" type="org.apache.hadoop.fs.Path"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="writeTokenStorageFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filename" type="org.apache.hadoop.fs.Path"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="format" type="org.apache.hadoop.security.Credentials.SerializedFormat"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Stores all the keys to DataOutput.
+ @param out
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Loads all the keys.
+ @param in
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="addAll"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.security.Credentials"/>
+      <doc>
+      <![CDATA[Copy all of the credentials from one credential object into another.
+ Existing secrets and tokens are overwritten.
+ @param other the credentials to copy]]>
+      </doc>
+    </method>
+    <method name="mergeAll"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.security.Credentials"/>
+      <doc>
+      <![CDATA[Copy all of the credentials from one credential object into another.
+ Existing secrets and tokens are not overwritten.
+ @param other the credentials to copy]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A class that provides the facilities of reading and writing
+ secret keys and Tokens.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.Credentials -->
+  <!-- start interface org.apache.hadoop.security.GroupMappingServiceProvider -->
+  <interface name="GroupMappingServiceProvider"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getGroups" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get all various group memberships of a given user.
+ Returns EMPTY list in case of non-existing user
+ @param user User's name
+ @return group memberships of user
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="cacheGroupsRefresh"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Refresh the cache of groups and user mapping
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="cacheGroupsAdd"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="groups" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Caches the group user information
+ @param groups list of groups to add to cache
+ @throws IOException]]>
+      </doc>
+    </method>
+    <field name="GROUP_MAPPING_CONFIG_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[An interface for the implementation of a user-to-groups mapping service
+ used by {@link Groups}.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.security.GroupMappingServiceProvider -->
+  <!-- start interface org.apache.hadoop.security.IdMappingServiceProvider -->
+  <interface name="IdMappingServiceProvider"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getUid" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getGid" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="group" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getUserName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uid" type="int"/>
+      <param name="unknown" type="java.lang.String"/>
+    </method>
+    <method name="getGroupName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="gid" type="int"/>
+      <param name="unknown" type="java.lang.String"/>
+    </method>
+    <method name="getUidAllowingUnknown" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+    </method>
+    <method name="getGidAllowingUnknown" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="group" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[An interface for the implementation of {@literal <}userId,
+ userName{@literal >} mapping and {@literal <}groupId, groupName{@literal >}
+ mapping.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.security.IdMappingServiceProvider -->
+  <!-- start class org.apache.hadoop.security.KerberosAuthException -->
+  <class name="KerberosAuthException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="KerberosAuthException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="KerberosAuthException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="KerberosAuthException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setUser"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="u" type="java.lang.String"/>
+    </method>
+    <method name="setPrincipal"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="java.lang.String"/>
+    </method>
+    <method name="setKeytabFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="k" type="java.lang.String"/>
+    </method>
+    <method name="setTicketCacheFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="t" type="java.lang.String"/>
+    </method>
+    <method name="getInitialMessage" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return The initial message, or null if not set.]]>
+      </doc>
+    </method>
+    <method name="getKeytabFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return The keytab file path, or null if not set.]]>
+      </doc>
+    </method>
+    <method name="getPrincipal" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return The principal, or null if not set.]]>
+      </doc>
+    </method>
+    <method name="getTicketCacheFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return The ticket cache file path, or null if not set.]]>
+      </doc>
+    </method>
+    <method name="getUser" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return The user, or null if not set.]]>
+      </doc>
+    </method>
+    <method name="getMessage" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Thrown when {@link UserGroupInformation} failed with an unrecoverable error,
+ such as failure in kerberos login/logout, invalid subject etc.
+
+ Caller should not retry when catching this exception.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.KerberosAuthException -->
+  <!-- start class org.apache.hadoop.security.SecurityUtil -->
+  <class name="SecurityUtil" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="setConfiguration"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="isOriginalTGT" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="ticket" type="javax.security.auth.kerberos.KerberosTicket"/>
+      <doc>
+      <![CDATA[Check whether the server principal is the TGS's principal
+ @param ticket the original TGT (the ticket that is obtained when a 
+ kinit is done)
+ @return true or false]]>
+      </doc>
+    </method>
+    <method name="getServerPrincipal" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="principalConfig" type="java.lang.String"/>
+      <param name="hostname" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Convert Kerberos principal name pattern to valid Kerberos principal
+ names. It replaces hostname pattern with hostname, which should be
+ fully-qualified domain name. If hostname is null or "0.0.0.0", it uses
+ dynamically looked-up fqdn of the current host instead.
+ 
+ @param principalConfig
+          the Kerberos principal name conf value to convert
+ @param hostname
+          the fully-qualified domain name used for substitution
+ @return converted Kerberos principal name
+ @throws IOException if the client address cannot be determined]]>
+      </doc>
+    </method>
+    <method name="getServerPrincipal" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="principalConfig" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Convert Kerberos principal name pattern to valid Kerberos principal names.
+ This method is similar to {@link #getServerPrincipal(String, String)},
+ except 1) the reverse DNS lookup from addr to hostname is done only when
+ necessary, 2) param addr can't be null (no default behavior of using local
+ hostname when addr is null).
+ 
+ @param principalConfig
+          Kerberos principal name pattern to convert
+ @param addr
+          InetAddress of the host used for substitution
+ @return converted Kerberos principal name
+ @throws IOException if the client address cannot be determined]]>
+      </doc>
+    </method>
+    <method name="login"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="keytabFileKey" type="java.lang.String"/>
+      <param name="userNameKey" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Login as a principal specified in config. Substitute $host in
+ user's Kerberos principal name with a dynamically looked-up fully-qualified
+ domain name of the current host.
+ 
+ @param conf
+          conf to use
+ @param keytabFileKey
+          the key to look for keytab file in conf
+ @param userNameKey
+          the key to look for user's Kerberos principal name in conf
+ @throws IOException if login fails]]>
+      </doc>
+    </method>
+    <method name="login"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="keytabFileKey" type="java.lang.String"/>
+      <param name="userNameKey" type="java.lang.String"/>
+      <param name="hostname" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Login as a principal specified in config. Substitute $host in user's Kerberos principal 
+ name with hostname. If non-secure mode - return. If no keytab available -
+ bail out with an exception
+ 
+ @param conf
+          conf to use
+ @param keytabFileKey
+          the key to look for keytab file in conf
+ @param userNameKey
+          the key to look for user's Kerberos principal name in conf
+ @param hostname
+          hostname to use for substitution
+ @throws IOException if the config doesn't specify a keytab]]>
+      </doc>
+    </method>
+    <method name="buildDTServiceName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <param name="defPort" type="int"/>
+      <doc>
+      <![CDATA[create the service name for a Delegation token
+ @param uri of the service
+ @param defPort is used if the uri lacks a port
+ @return the token service, or null if no authority
+ @see #buildTokenService(InetSocketAddress)]]>
+      </doc>
+    </method>
+    <method name="getHostFromPrincipal" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="principalName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the host name from the principal name of format {@literal <}service
+ {@literal >}/host@realm.
+ @param principalName principal name of format as described above
+ @return host name if the the string conforms to the above format, else null]]>
+      </doc>
+    </method>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Look up the KerberosInfo for a given protocol. It searches all known
+ SecurityInfo providers.
+ @param protocol the protocol class to get the information for
+ @param conf configuration object
+ @return the KerberosInfo or null if it has no KerberosInfo defined]]>
+      </doc>
+    </method>
+    <method name="getClientPrincipal" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Look up the client principal for a given protocol. It searches all known
+ SecurityInfo providers.
+ @param protocol the protocol class to get the information for
+ @param conf configuration object
+ @return client principal or null if it has no client principal defined.]]>
+      </doc>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Look up the TokenInfo for a given protocol. It searches all known
+ SecurityInfo providers.
+ @param protocol The protocol class to get the information for.
+ @param conf Configuration object
+ @return the TokenInfo or null if it has no KerberosInfo defined]]>
+      </doc>
+    </method>
+    <method name="getTokenServiceAddr" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <doc>
+      <![CDATA[Decode the given token's service field into an InetAddress
+ @param token from which to obtain the service
+ @return InetAddress for the service]]>
+      </doc>
+    </method>
+    <method name="setTokenService"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <param name="addr" type="java.net.InetSocketAddress"/>
+      <doc>
+      <![CDATA[Set the given token's service to the format expected by the RPC client 
+ @param token a delegation token
+ @param addr the socket for the rpc connection]]>
+      </doc>
+    </method>
+    <method name="buildTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="addr" type="java.net.InetSocketAddress"/>
+      <doc>
+      <![CDATA[Construct the service key for a token
+ @param addr InetSocketAddress of remote connection with a token
+ @return "ip:port" or "host:port" depending on the value of
+          hadoop.security.token.service.use_ip]]>
+      </doc>
+    </method>
+    <method name="buildTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <doc>
+      <![CDATA[Construct the service key for a token
+ @param uri of remote connection with a token
+ @return "ip:port" or "host:port" depending on the value of
+          hadoop.security.token.service.use_ip]]>
+      </doc>
+    </method>
+    <method name="doAsLoginUserOrFatal" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="action" type="java.security.PrivilegedAction"/>
+      <doc>
+      <![CDATA[Perform the given action as the daemon's login user. If the login
+ user cannot be determined, this will log a FATAL error and exit
+ the whole JVM.]]>
+      </doc>
+    </method>
+    <method name="doAsLoginUser" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="action" type="java.security.PrivilegedExceptionAction"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Perform the given action as the daemon's login user. If an
+ InterruptedException is thrown, it is converted to an IOException.
+
+ @param action the action to perform
+ @return the result of the action
+ @throws IOException in the event of error]]>
+      </doc>
+    </method>
+    <method name="doAsCurrentUser" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="action" type="java.security.PrivilegedExceptionAction"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Perform the given action as the daemon's current user. If an
+ InterruptedException is thrown, it is converted to an IOException.
+
+ @param action the action to perform
+ @return the result of the action
+ @throws IOException in the event of error]]>
+      </doc>
+    </method>
+    <method name="getAuthenticationMethod" return="org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="setAuthenticationMethod"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="authenticationMethod" type="org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="isPrivilegedPort" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="port" type="int"/>
+    </method>
+    <method name="getZKAuthInfos" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="configKey" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Utility method to fetch ZK auth info from the configuration.
+ @throws java.io.IOException if the Zookeeper ACLs configuration file
+ cannot be read
+ @throws ZKUtil.BadAuthFormatException if the auth format is invalid]]>
+      </doc>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HOSTNAME_PATTERN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FAILED_TO_GET_UGI_MSG_HEADER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Security Utils.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.SecurityUtil -->
+  <!-- start class org.apache.hadoop.security.UserGroupInformation -->
+  <class name="UserGroupInformation" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="setShouldRenewImmediatelyForTests"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="immediate" type="boolean"/>
+      <doc>
+      <![CDATA[For the purposes of unit tests, we want to test login
+ from keytab and don't want to wait until the renew
+ window (controlled by TICKET_RENEW_WINDOW).
+ @param immediate true if we should login without waiting for ticket window]]>
+      </doc>
+    </method>
+    <method name="reattachMetrics"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Reattach the class's metrics to a new metric system.]]>
+      </doc>
+    </method>
+    <method name="isInitialized" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setConfiguration"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Set the static configuration for UGI.
+ In particular, set the security authentication mechanism and the
+ group look up service.
+ @param conf the configuration to use]]>
+      </doc>
+    </method>
+    <method name="isSecurityEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Determine if UserGroupInformation is using Kerberos to determine
+ user identities or is relying on simple authentication
+ 
+ @return true if UGI is working in a secure environment]]>
+      </doc>
+    </method>
+    <method name="hasKerberosCredentials" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[checks if logged in using kerberos
+ @return true if the subject logged via keytab or has a Kerberos TGT]]>
+      </doc>
+    </method>
+    <method name="getCurrentUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the current user, including any doAs in the current stack.
+ @return the current user
+ @throws IOException if login fails]]>
+      </doc>
+    </method>
+    <method name="getBestUGI" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ticketCachePath" type="java.lang.String"/>
+      <param name="user" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Find the most appropriate UserGroupInformation to use
+
+ @param ticketCachePath    The Kerberos ticket cache path, or NULL
+                           if none is specfied
+ @param user               The user name, or NULL if none is specified.
+
+ @return                   The most appropriate UserGroupInformation]]>
+      </doc>
+    </method>
+    <method name="getUGIFromTicketCache" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ticketCache" type="java.lang.String"/>
+      <param name="user" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a UserGroupInformation from a Kerberos ticket cache.
+ 
+ @param user                The principal name to load from the ticket
+                            cache
+ @param ticketCache     the path to the ticket cache file
+
+ @throws IOException        if the kerberos login fails]]>
+      </doc>
+    </method>
+    <method name="getUGIFromSubject" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="subject" type="javax.security.auth.Subject"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a UserGroupInformation from a Subject with Kerberos principal.
+
+ @param subject             The KerberosPrincipal to use in UGI.
+                            The creator of subject is responsible for
+                            renewing credentials.
+
+ @throws IOException
+ @throws KerberosAuthException if the kerberos login fails]]>
+      </doc>
+    </method>
+    <method name="getLoginUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the currently logged in user.  If no explicit login has occurred,
+ the user will automatically be logged in with either kerberos credentials
+ if available, or as the local OS user, based on security settings.
+ @return the logged in user
+ @throws IOException if login fails]]>
+      </doc>
+    </method>
+    <method name="trimLoginMethod" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="userName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[remove the login method that is followed by a space from the username
+ e.g. "jack (auth:SIMPLE)" {@literal ->} "jack"
+
+ @param userName
+ @return userName without login method]]>
+      </doc>
+    </method>
+    <method name="loginUserFromSubject"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="subject" type="javax.security.auth.Subject"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Log in a user using the given subject
+ @param subject the subject to use when logging in a user, or null to
+ create a new subject.
+
+ If subject is not null, the creator of subject is responsible for renewing
+ credentials.
+
+ @throws IOException if login fails]]>
+      </doc>
+    </method>
+    <method name="isFromKeytab" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Is this user logged in from a keytab file managed by the UGI?
+ @return true if the credentials are from a keytab file.]]>
+      </doc>
+    </method>
+    <method name="loginUserFromKeytab"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <param name="path" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Log a user in from a keytab file. Loads a user identity from a keytab
+ file and logs them in. They become the currently logged-in user.
+ @param user the principal name to load from the keytab
+ @param path the path to the keytab file
+ @throws IOException
+ @throws KerberosAuthException if it's a kerberos login exception.]]>
+      </doc>
+    </method>
+    <method name="logoutUserFromKeytab"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Log the current user out who previously logged in using keytab.
+ This method assumes that the user logged in by calling
+ {@link #loginUserFromKeytab(String, String)}.
+
+ @throws IOException
+ @throws KerberosAuthException if a failure occurred in logout,
+ or if the user did not log in by invoking loginUserFromKeyTab() before.]]>
+      </doc>
+    </method>
+    <method name="checkTGTAndReloginFromKeytab"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Re-login a user from keytab if TGT is expired or is close to expiry.
+ 
+ @throws IOException
+ @throws KerberosAuthException if it's a kerberos login exception.]]>
+      </doc>
+    </method>
+    <method name="reloginFromKeytab"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Re-Login a user in from a keytab file. Loads a user identity from a keytab
+ file and logs them in. They become the currently logged-in user. This
+ method assumes that {@link #loginUserFromKeytab(String, String)} had
+ happened already.
+ The Subject field of this UserGroupInformation object is updated to have
+ the new credentials.
+ @throws IOException
+ @throws KerberosAuthException on a failure]]>
+      </doc>
+    </method>
+    <method name="forceReloginFromKeytab"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Force re-Login a user in from a keytab file irrespective of the last login
+ time. Loads a user identity from a keytab file and logs them in. They
+ become the currently logged-in user. This method assumes that
+ {@link #loginUserFromKeytab(String, String)} had happened already. The
+ Subject field of this UserGroupInformation object is updated to have the
+ new credentials.
+
+ @throws IOException
+ @throws KerberosAuthException on a failure]]>
+      </doc>
+    </method>
+    <method name="reloginFromTicketCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Re-Login a user in from the ticket cache.  This
+ method assumes that login had happened already.
+ The Subject field of this UserGroupInformation object is updated to have
+ the new credentials.
+ @throws IOException
+ @throws KerberosAuthException on a failure]]>
+      </doc>
+    </method>
+    <method name="loginUserFromKeytabAndReturnUGI" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <param name="path" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Log a user in from a keytab file. Loads a user identity from a keytab
+ file and login them in. This new user does not affect the currently
+ logged-in user.
+ @param user the principal name to load from the keytab
+ @param path the path to the keytab file
+ @throws IOException if the keytab file can't be read]]>
+      </doc>
+    </method>
+    <method name="isLoginKeytabBased" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Did the login happen via keytab
+ @return true or false]]>
+      </doc>
+    </method>
+    <method name="isLoginTicketBased" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Did the login happen via ticket cache
+ @return true or false]]>
+      </doc>
+    </method>
+    <method name="createRemoteUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Create a user from a login name. It is intended to be used for remote
+ users in RPC, since it won't have any credentials.
+ @param user the full user principal name, must not be empty or null
+ @return the UserGroupInformation for the remote user.]]>
+      </doc>
+    </method>
+    <method name="createRemoteUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <param name="authMethod" type="org.apache.hadoop.security.SaslRpcServer.AuthMethod"/>
+      <doc>
+      <![CDATA[Create a user from a login name. It is intended to be used for remote
+ users in RPC, since it won't have any credentials.
+ @param user the full user principal name, must not be empty or null
+ @return the UserGroupInformation for the remote user.]]>
+      </doc>
+    </method>
+    <method name="createProxyUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <param name="realUser" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <doc>
+      <![CDATA[Create a proxy user using username of the effective user and the ugi of the
+ real user.
+ @param user
+ @param realUser
+ @return proxyUser ugi]]>
+      </doc>
+    </method>
+    <method name="getRealUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[get RealUser (vs. EffectiveUser)
+ @return realUser running over proxy user]]>
+      </doc>
+    </method>
+    <method name="createUserForTesting" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <param name="userGroups" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Create a UGI for testing HDFS and MapReduce
+ @param user the full user principal name
+ @param userGroups the names of the groups that the user belongs to
+ @return a fake user for running unit tests]]>
+      </doc>
+    </method>
+    <method name="createProxyUserForTesting" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <param name="realUser" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="userGroups" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Create a proxy user UGI for testing HDFS and MapReduce
+ 
+ @param user
+          the full user principal name for effective user
+ @param realUser
+          UGI of the real user
+ @param userGroups
+          the names of the groups that the user belongs to
+ @return a fake user for running unit tests]]>
+      </doc>
+    </method>
+    <method name="getShortUserName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user's login name.
+ @return the user's name up to the first '/' or '@'.]]>
+      </doc>
+    </method>
+    <method name="getPrimaryGroupName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getUserName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user's full principal name.
+ @return the user's full principal name.]]>
+      </doc>
+    </method>
+    <method name="addTokenIdentifier" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tokenId" type="org.apache.hadoop.security.token.TokenIdentifier"/>
+      <doc>
+      <![CDATA[Add a TokenIdentifier to this UGI. The TokenIdentifier has typically been
+ authenticated by the RPC layer as belonging to the user represented by this
+ UGI.
+ 
+ @param tokenId
+          tokenIdentifier to be added
+ @return true on successful add of new tokenIdentifier]]>
+      </doc>
+    </method>
+    <method name="getTokenIdentifiers" return="java.util.Set"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the set of TokenIdentifiers belonging to this UGI
+ 
+ @return the set of TokenIdentifiers belonging to this UGI]]>
+      </doc>
+    </method>
+    <method name="addToken" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <doc>
+      <![CDATA[Add a token to this UGI
+ 
+ @param token Token to be added
+ @return true on successful add of new token]]>
+      </doc>
+    </method>
+    <method name="addToken" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="alias" type="org.apache.hadoop.io.Text"/>
+      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <doc>
+      <![CDATA[Add a named token to this UGI
+ 
+ @param alias Name of the token
+ @param token Token to be added
+ @return true on successful add of new token]]>
+      </doc>
+    </method>
+    <method name="getTokens" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain the collection of tokens associated with this user.
+ 
+ @return an unmodifiable collection of tokens associated with user]]>
+      </doc>
+    </method>
+    <method name="getCredentials" return="org.apache.hadoop.security.Credentials"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain the tokens in credentials form associated with this user.
+ 
+ @return Credentials of tokens associated with this user]]>
+      </doc>
+    </method>
+    <method name="addCredentials"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="credentials" type="org.apache.hadoop.security.Credentials"/>
+      <doc>
+      <![CDATA[Add the given Credentials to this user.
+ @param credentials of tokens and secrets]]>
+      </doc>
+    </method>
+    <method name="getGroupNames" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the group names for this user. {@link #getGroups()} is less
+ expensive alternative when checking for a contained element.
+ @return the list of users with the primary group first. If the command
+    fails, it returns an empty list.]]>
+      </doc>
+    </method>
+    <method name="getGroups" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the group names for this user.
+ @return the list of users with the primary group first. If the command
+    fails, it returns an empty list.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the username.]]>
+      </doc>
+    </method>
+    <method name="setAuthenticationMethod"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="authMethod" type="org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod"/>
+      <doc>
+      <![CDATA[Sets the authentication method in the subject
+ 
+ @param authMethod]]>
+      </doc>
+    </method>
+    <method name="setAuthenticationMethod"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="authMethod" type="org.apache.hadoop.security.SaslRpcServer.AuthMethod"/>
+      <doc>
+      <![CDATA[Sets the authentication method in the subject
+ 
+ @param authMethod]]>
+      </doc>
+    </method>
+    <method name="getAuthenticationMethod" return="org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the authentication method from the subject
+ 
+ @return AuthenticationMethod in the subject, null if not present.]]>
+      </doc>
+    </method>
+    <method name="getRealAuthenticationMethod" return="org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the authentication method from the real user's subject.  If there
+ is no real user, return the given user's authentication method.
+ 
+ @return AuthenticationMethod in the subject, null if not present.]]>
+      </doc>
+    </method>
+    <method name="getRealAuthenticationMethod" return="org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <doc>
+      <![CDATA[Returns the authentication method of a ugi. If the authentication method is
+ PROXY, returns the authentication method of the real user.
+ 
+ @param ugi
+ @return AuthenticationMethod]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Compare the subjects to see if they are equal to each other.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the hash of the subject.]]>
+      </doc>
+    </method>
+    <method name="getSubject" return="javax.security.auth.Subject"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the underlying subject from this ugi.
+ @return the subject that represents this user.]]>
+      </doc>
+    </method>
+    <method name="doAs" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="action" type="java.security.PrivilegedAction"/>
+      <doc>
+      <![CDATA[Run the given action as the user.
+ @param <T> the return type of the run method
+ @param action the method to execute
+ @return the value from the run method]]>
+      </doc>
+    </method>
+    <method name="doAs" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="action" type="java.security.PrivilegedExceptionAction"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Run the given action as the user, potentially throwing an exception.
+ @param <T> the return type of the run method
+ @param action the method to execute
+ @return the value from the run method
+ @throws IOException if the action throws an IOException
+ @throws Error if the action throws an Error
+ @throws RuntimeException if the action throws a RuntimeException
+ @throws InterruptedException if the action throws an InterruptedException
+ @throws UndeclaredThrowableException if the action throws something else]]>
+      </doc>
+    </method>
+    <method name="logAllUserInfo"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Log all (current, real, login) UGI and token info into UGI debug log.
+ @param ugi - UGI
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[A test method to print out the current user's UGI.
+ @param args if there are two arguments, read the user from the keytab
+ and print it out.
+ @throws Exception]]>
+      </doc>
+    </method>
+    <field name="HADOOP_TOKEN_FILE_LOCATION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Environment variable pointing to the token cache file]]>
+      </doc>
+    </field>
+    <field name="HADOOP_TOKEN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Environment variable pointing to the base64 tokens.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[User and group information for Hadoop.
+ This class wraps around a JAAS Subject and provides methods to determine the
+ user's username and groups. It supports both the Windows, Unix and Kerberos 
+ login modules.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.UserGroupInformation -->
+  <!-- start class org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod -->
+  <class name="UserGroupInformation.AuthenticationMethod" extends="java.lang.Enum"
+    abstract="false"
+    static="true" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="getAuthMethod" return="org.apache.hadoop.security.SaslRpcServer.AuthMethod"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="authMethod" type="org.apache.hadoop.security.SaslRpcServer.AuthMethod"/>
+    </method>
+    <doc>
+    <![CDATA[existing types of authentications' methods]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod -->
+</package>
+<package name="org.apache.hadoop.security.alias">
+  <!-- start class org.apache.hadoop.security.alias.CredentialProvider -->
+  <class name="CredentialProvider" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CredentialProvider"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="isTransient" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Indicates whether this provider represents a store
+ that is intended for transient use - such as the UserProvider
+ is. These providers are generally used to provide job access to
+ passwords rather than for long term storage.
+ @return true if transient, false otherwise]]>
+      </doc>
+    </method>
+    <method name="flush"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Ensures that any changes to the credentials are written to persistent
+ store.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getCredentialEntry" return="org.apache.hadoop.security.alias.CredentialProvider.CredentialEntry"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="alias" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the credential entry for a specific alias.
+ @param alias the name of a specific credential
+ @return the credentialEntry
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAliases" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the aliases for all credentials.
+ @return the list of alias names
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createCredentialEntry" return="org.apache.hadoop.security.alias.CredentialProvider.CredentialEntry"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="credential" type="char[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a new credential. The given alias must not already exist.
+ @param name the alias of the credential
+ @param credential the credential value for the alias.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="deleteCredentialEntry"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Delete the given credential.
+ @param name the alias of the credential to delete
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="needsPassword" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Does this provider require a password? This means that a password is
+ required for normal operation, and it has not been found through normal
+ means. If true, the password should be provided by the caller using
+ setPassword().
+ @return Whether or not the provider requires a password
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="noPasswordWarning" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[If a password for the provider is needed, but is not provided, this will
+ return a warning and instructions for supplying said password to the
+ provider.
+ @return A warning and instructions for supplying the password]]>
+      </doc>
+    </method>
+    <method name="noPasswordError" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[If a password for the provider is needed, but is not provided, this will
+ return an error message and instructions for supplying said password to
+ the provider.
+ @return An error message and instructions for supplying the password]]>
+      </doc>
+    </method>
+    <field name="CLEAR_TEXT_FALLBACK" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A provider of credentials or password for Hadoop applications. Provides an
+ abstraction to separate credential storage from users of them. It
+ is intended to support getting or storing passwords in a variety of ways,
+ including third party bindings.
+ 
+ <code>CredentialProvider</code> implementations must be thread safe.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.alias.CredentialProvider -->
+  <!-- start class org.apache.hadoop.security.alias.CredentialProviderFactory -->
+  <class name="CredentialProviderFactory" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CredentialProviderFactory"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createProvider" return="org.apache.hadoop.security.alias.CredentialProvider"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="providerName" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getProviders" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="CREDENTIAL_PROVIDER_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A factory to create a list of CredentialProvider based on the path given in a
+ Configuration. It uses a service loader interface to find the available
+ CredentialProviders and create them based on the list of URIs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.alias.CredentialProviderFactory -->
+</package>
+<package name="org.apache.hadoop.security.authentication.server">
+</package>
+<package name="org.apache.hadoop.security.authorize">
+  <!-- start class org.apache.hadoop.security.authorize.AccessControlList -->
+  <class name="AccessControlList" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="AccessControlList"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This constructor exists primarily for AccessControlList to be Writable.]]>
+      </doc>
+    </constructor>
+    <constructor name="AccessControlList" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a new ACL from a String representation of the same.
+ 
+ The String is a a comma separated list of users and groups.
+ The user list comes first and is separated by a space followed 
+ by the group list. For e.g. "user1,user2 group1,group2"
+ 
+ @param aclString String representation of the ACL]]>
+      </doc>
+    </constructor>
+    <constructor name="AccessControlList" type="java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a new ACL from String representation of users and groups
+ 
+ The arguments are comma separated lists
+ 
+ @param users comma separated list of users
+ @param groups comma separated list of groups]]>
+      </doc>
+    </constructor>
+    <method name="isAllAllowed" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="addUser"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add user to the names of users allowed for this service.
+ 
+ @param user
+          The user name]]>
+      </doc>
+    </method>
+    <method name="addGroup"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="group" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add group to the names of groups allowed for this service.
+ 
+ @param group
+          The group name]]>
+      </doc>
+    </method>
+    <method name="removeUser"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Remove user from the names of users allowed for this service.
+ 
+ @param user
+          The user name]]>
+      </doc>
+    </method>
+    <method name="removeGroup"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="group" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Remove group from the names of groups allowed for this service.
+ 
+ @param group
+          The group name]]>
+      </doc>
+    </method>
+    <method name="getUsers" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the names of users allowed for this service.
+ @return the set of user names. the set must not be modified.]]>
+      </doc>
+    </method>
+    <method name="getGroups" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the names of user groups allowed for this service.
+ @return the set of group names. the set must not be modified.]]>
+      </doc>
+    </method>
+    <method name="isUserInList" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <doc>
+      <![CDATA[Checks if a user represented by the provided {@link UserGroupInformation}
+ is a member of the Access Control List. If user was proxied and
+ USE_REAL_ACLS + the real user name is in the control list, then treat this
+ case as if user were in the ACL list.
+ @param ugi UserGroupInformation to check if contained in the ACL
+ @return true if ugi is member of the list or if USE_REAL_ACLS + real user
+ is in the list]]>
+      </doc>
+    </method>
+    <method name="isUserAllowed" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns descriptive way of users and groups that are part of this ACL.
+ Use {@link #getAclString()} to get the exact String that can be given to
+ the constructor of AccessControlList to create a new instance.]]>
+      </doc>
+    </method>
+    <method name="getAclString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the access control list as a String that can be used for building a
+ new instance by sending it to the constructor of {@link AccessControlList}.]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Serializes the AccessControlList object]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Deserializes the AccessControlList object]]>
+      </doc>
+    </method>
+    <field name="WILDCARD_ACL_VALUE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="USE_REAL_ACLS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Class representing a configured access control list.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.authorize.AccessControlList -->
+  <!-- start class org.apache.hadoop.security.authorize.AuthorizationException -->
+  <class name="AuthorizationException" extends="org.apache.hadoop.security.AccessControlException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AuthorizationException"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AuthorizationException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AuthorizationException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a new exception with the specified cause and a detail
+ message of <tt>(cause==null ? null : cause.toString())</tt> (which
+ typically contains the class and detail message of <tt>cause</tt>).
+ @param  cause the cause (which is saved for later retrieval by the
+         {@link #getCause()} method).  (A <tt>null</tt> value is
+         permitted, and indicates that the cause is nonexistent or
+         unknown.)]]>
+      </doc>
+    </constructor>
+    <method name="getStackTrace" return="java.lang.StackTraceElement[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="printStackTrace"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="printStackTrace"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="s" type="java.io.PrintStream"/>
+    </method>
+    <method name="printStackTrace"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="s" type="java.io.PrintWriter"/>
+    </method>
+    <doc>
+    <![CDATA[An exception class for authorization-related issues.
+ 
+ This class <em>does not</em> provide the stack trace for security purposes.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.authorize.AuthorizationException -->
+  <!-- start class org.apache.hadoop.security.authorize.DefaultImpersonationProvider -->
+  <class name="DefaultImpersonationProvider" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.authorize.ImpersonationProvider"/>
+    <constructor name="DefaultImpersonationProvider"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getTestProvider" return="org.apache.hadoop.security.authorize.DefaultImpersonationProvider"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="configurationPrefix" type="java.lang.String"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="authorize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="remoteAddress" type="java.net.InetAddress"/>
+      <exception name="AuthorizationException" type="org.apache.hadoop.security.authorize.AuthorizationException"/>
+    </method>
+    <method name="getProxySuperuserUserConfKey" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="userName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns configuration key for effective usergroups allowed for a superuser
+ 
+ @param userName name of the superuser
+ @return configuration key for superuser usergroups]]>
+      </doc>
+    </method>
+    <method name="getProxySuperuserGroupConfKey" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="userName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns configuration key for effective groups allowed for a superuser
+ 
+ @param userName name of the superuser
+ @return configuration key for superuser groups]]>
+      </doc>
+    </method>
+    <method name="getProxySuperuserIpConfKey" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="userName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Return configuration key for superuser ip addresses
+ 
+ @param userName name of the superuser
+ @return configuration key for superuser ip-addresses]]>
+      </doc>
+    </method>
+    <method name="getProxyGroups" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProxyHosts" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.security.authorize.DefaultImpersonationProvider -->
+  <!-- start interface org.apache.hadoop.security.authorize.ImpersonationProvider -->
+  <interface name="ImpersonationProvider"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <method name="init"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="configurationPrefix" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Specifies the configuration prefix for the proxy user properties and
+ initializes the provider.
+
+ @param configurationPrefix the configuration prefix for the proxy user
+ properties]]>
+      </doc>
+    </method>
+    <method name="authorize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="remoteAddress" type="java.lang.String"/>
+      <exception name="AuthorizationException" type="org.apache.hadoop.security.authorize.AuthorizationException"/>
+      <doc>
+      <![CDATA[Authorize the superuser which is doing doAs.
+ {@link #authorize(UserGroupInformation, InetAddress)} should
+             be preferred to avoid possibly re-resolving the ip address.
+ @param user ugi of the effective or proxy user which contains a real user.
+ @param remoteAddress the ip address of client.
+ @throws AuthorizationException]]>
+      </doc>
+    </method>
+    <method name="authorize"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="remoteAddress" type="java.net.InetAddress"/>
+      <exception name="AuthorizationException" type="org.apache.hadoop.security.authorize.AuthorizationException"/>
+      <doc>
+      <![CDATA[Authorize the superuser which is doing doAs.
+
+ @param user ugi of the effective or proxy user which contains a real user
+ @param remoteAddress the ip address of client
+ @throws AuthorizationException]]>
+      </doc>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.security.authorize.ImpersonationProvider -->
+</package>
+<package name="org.apache.hadoop.security.http">
+  <!-- start class org.apache.hadoop.security.http.RestCsrfPreventionFilter -->
+  <class name="RestCsrfPreventionFilter" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="javax.servlet.Filter"/>
+    <constructor name="RestCsrfPreventionFilter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filterConfig" type="javax.servlet.FilterConfig"/>
+      <exception name="ServletException" type="javax.servlet.ServletException"/>
+    </method>
+    <method name="isBrowser" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="userAgent" type="java.lang.String"/>
+      <doc>
+      <![CDATA[This method interrogates the User-Agent String and returns whether it
+ refers to a browser.  If its not a browser, then the requirement for the
+ CSRF header will not be enforced; if it is a browser, the requirement will
+ be enforced.
+ <p>
+ A User-Agent String is considered to be a browser if it matches
+ any of the regex patterns from browser-useragent-regex; the default
+ behavior is to consider everything a browser that matches the following:
+ "^Mozilla.*,^Opera.*".  Subclasses can optionally override
+ this method to use different behavior.
+
+ @param userAgent The User-Agent String, or null if there isn't one
+ @return true if the User-Agent String refers to a browser, false if not]]>
+      </doc>
+    </method>
+    <method name="handleHttpInteraction"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="httpInteraction" type="org.apache.hadoop.security.http.RestCsrfPreventionFilter.HttpInteraction"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="ServletException" type="javax.servlet.ServletException"/>
+      <doc>
+      <![CDATA[Handles an {@link HttpInteraction} by applying the filtering logic.
+
+ @param httpInteraction caller's HTTP interaction
+ @throws IOException if there is an I/O error
+ @throws ServletException if the implementation relies on the servlet API
+     and a servlet API call has failed]]>
+      </doc>
+    </method>
+    <method name="doFilter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="javax.servlet.ServletRequest"/>
+      <param name="response" type="javax.servlet.ServletResponse"/>
+      <param name="chain" type="javax.servlet.FilterChain"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="ServletException" type="javax.servlet.ServletException"/>
+    </method>
+    <method name="destroy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFilterParams" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="confPrefix" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Constructs a mapping of configuration properties to be used for filter
+ initialization.  The mapping includes all properties that start with the
+ specified configuration prefix.  Property names in the mapping are trimmed
+ to remove the configuration prefix.
+
+ @param conf configuration to read
+ @param confPrefix configuration prefix
+ @return mapping of configuration properties to be used for filter
+     initialization]]>
+      </doc>
+    </method>
+    <field name="HEADER_USER_AGENT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="BROWSER_USER_AGENT_PARAM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CUSTOM_HEADER_PARAM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CUSTOM_METHODS_TO_IGNORE_PARAM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HEADER_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This filter provides protection against cross site request forgery (CSRF)
+ attacks for REST APIs. Enabling this filter on an endpoint results in the
+ requirement of all client to send a particular (configurable) HTTP header
+ with every request. In the absense of this header the filter will reject the
+ attempt as a bad request.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.http.RestCsrfPreventionFilter -->
+  <!-- start class org.apache.hadoop.security.http.XFrameOptionsFilter -->
+  <class name="XFrameOptionsFilter" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="javax.servlet.Filter"/>
+    <constructor name="XFrameOptionsFilter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="destroy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="doFilter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="javax.servlet.ServletRequest"/>
+      <param name="res" type="javax.servlet.ServletResponse"/>
+      <param name="chain" type="javax.servlet.FilterChain"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="ServletException" type="javax.servlet.ServletException"/>
+    </method>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="config" type="javax.servlet.FilterConfig"/>
+      <exception name="ServletException" type="javax.servlet.ServletException"/>
+    </method>
+    <method name="getFilterParams" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="confPrefix" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Constructs a mapping of configuration properties to be used for filter
+ initialization.  The mapping includes all properties that start with the
+ specified configuration prefix.  Property names in the mapping are trimmed
+ to remove the configuration prefix.
+
+ @param conf configuration to read
+ @param confPrefix configuration prefix
+ @return mapping of configuration properties to be used for filter
+     initialization]]>
+      </doc>
+    </method>
+    <field name="X_FRAME_OPTIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CUSTOM_HEADER_PARAM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This filter protects webapps from clickjacking attacks that
+ are possible through use of Frames to embed the resources in another
+ application and intercept clicks to accomplish nefarious things.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.http.XFrameOptionsFilter -->
+</package>
+<package name="org.apache.hadoop.security.protocolPB">
+</package>
+<package name="org.apache.hadoop.security.ssl">
+</package>
+<package name="org.apache.hadoop.security.token">
+  <!-- start class org.apache.hadoop.security.token.SecretManager -->
+  <class name="SecretManager" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SecretManager"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createPassword" return="byte[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="identifier" type="T"/>
+      <doc>
+      <![CDATA[Create the password for the given identifier.
+ identifier may be modified inside this method.
+ @param identifier the identifier to use
+ @return the new password]]>
+      </doc>
+    </method>
+    <method name="retrievePassword" return="byte[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="identifier" type="T"/>
+      <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+      <doc>
+      <![CDATA[Retrieve the password for the given token identifier. Should check the date
+ or registry to make sure the token hasn't expired or been revoked. Returns 
+ the relevant password.
+ @param identifier the identifier to validate
+ @return the password to use
+ @throws InvalidToken the token was invalid]]>
+      </doc>
+    </method>
+    <method name="retriableRetrievePassword" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="identifier" type="T"/>
+      <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+      <exception name="StandbyException" type="org.apache.hadoop.ipc.StandbyException"/>
+      <exception name="RetriableException" type="org.apache.hadoop.ipc.RetriableException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The same functionality with {@link #retrievePassword}, except that this 
+ method can throw a {@link RetriableException} or a {@link StandbyException}
+ to indicate that client can retry/failover the same operation because of 
+ temporary issue on the server side.
+ 
+ @param identifier the identifier to validate
+ @return the password to use
+ @throws InvalidToken the token was invalid
+ @throws StandbyException the server is in standby state, the client can
+         try other servers
+ @throws RetriableException the token was invalid, and the server thinks 
+         this may be a temporary issue and suggests the client to retry
+ @throws IOException to allow future exceptions to be added without breaking
+         compatibility]]>
+      </doc>
+    </method>
+    <method name="createIdentifier" return="T"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an empty token identifier.
+ @return the newly created empty token identifier]]>
+      </doc>
+    </method>
+    <method name="checkAvailableForRead"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="StandbyException" type="org.apache.hadoop.ipc.StandbyException"/>
+      <doc>
+      <![CDATA[No-op if the secret manager is available for reading tokens, throw a
+ StandbyException otherwise.
+ 
+ @throws StandbyException if the secret manager is not available to read
+         tokens]]>
+      </doc>
+    </method>
+    <method name="generateSecret" return="javax.crypto.SecretKey"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Generate a new random secret key.
+ @return the new key]]>
+      </doc>
+    </method>
+    <method name="createPassword" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="identifier" type="byte[]"/>
+      <param name="key" type="javax.crypto.SecretKey"/>
+      <doc>
+      <![CDATA[Compute HMAC of the identifier using the secret key and return the 
+ output as password
+ @param identifier the bytes of the identifier
+ @param key the secret key
+ @return the bytes of the generated password]]>
+      </doc>
+    </method>
+    <method name="createSecretKey" return="javax.crypto.SecretKey"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="key" type="byte[]"/>
+      <doc>
+      <![CDATA[Convert the byte[] to a secret key
+ @param key the byte[] to create a secret key from
+ @return the secret key]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The server-side secret manager for each token type.
+ @param <T> The type of the token identifier]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.token.SecretManager -->
+  <!-- start class org.apache.hadoop.security.token.Token -->
+  <class name="Token" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="Token" type="T, org.apache.hadoop.security.token.SecretManager"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a token given a token identifier and a secret manager for the
+ type of the token identifier.
+ @param id the token identifier
+ @param mgr the secret manager]]>
+      </doc>
+    </constructor>
+    <constructor name="Token" type="byte[], byte[], org.apache.hadoop.io.Text, org.apache.hadoop.io.Text"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a token from the components.
+ @param identifier the token identifier
+ @param password the token's password
+ @param kind the kind of token
+ @param service the service for this token]]>
+      </doc>
+    </constructor>
+    <constructor name="Token"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor.]]>
+      </doc>
+    </constructor>
+    <constructor name="Token" type="org.apache.hadoop.security.token.Token"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Clone a token.
+ @param other the token to clone]]>
+      </doc>
+    </constructor>
+    <method name="setID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="bytes" type="byte[]"/>
+    </method>
+    <method name="setPassword"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newPassword" type="byte[]"/>
+    </method>
+    <method name="copyToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getIdentifier" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the token identifier's byte representation.
+ @return the token identifier's byte representation]]>
+      </doc>
+    </method>
+    <method name="decodeIdentifier" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the token identifier object, or null if it could not be constructed
+ (because the class could not be loaded, for example).
+ @return the token identifier, or null if there was no class found for it
+ @throws IOException failure to unmarshall the data
+ @throws RuntimeException if the token class could not be instantiated.]]>
+      </doc>
+    </method>
+    <method name="getPassword" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the token password/secret.
+ @return the token password/secret]]>
+      </doc>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the token kind.
+ @return the kind of the token]]>
+      </doc>
+    </method>
+    <method name="getService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the service on which the token is supposed to be used.
+ @return the service name]]>
+      </doc>
+    </method>
+    <method name="setService"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newService" type="org.apache.hadoop.io.Text"/>
+      <doc>
+      <![CDATA[Set the service on which the token is supposed to be used.
+ @param newService the service name]]>
+      </doc>
+    </method>
+    <method name="isPrivate" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Whether this is a private token.
+ @return false always for non-private tokens]]>
+      </doc>
+    </method>
+    <method name="isPrivateCloneOf" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="thePublicService" type="org.apache.hadoop.io.Text"/>
+      <doc>
+      <![CDATA[Whether this is a private clone of a public token.
+ @param thePublicService the public service name
+ @return false always for non-private tokens]]>
+      </doc>
+    </method>
+    <method name="privateClone" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newService" type="org.apache.hadoop.io.Text"/>
+      <doc>
+      <![CDATA[Create a private clone of a public token.
+ @param newService the new service name
+ @return a private token]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="encodeToUrlString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Encode this token as a url safe string.
+ @return the encoded string
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="decodeFromUrlString"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newValue" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Decode the given url safe string into this token.
+ @param newValue the encoded string
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="right" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="buildCacheKey" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isManaged" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Is this token managed so that it can be renewed or cancelled?
+ @return true, if it can be renewed and cancelled.]]>
+      </doc>
+    </method>
+    <method name="renew" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Renew this delegation token.
+ @return the new expiration time
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="cancel"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Cancel this delegation token.
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[The client-side form of the token.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.token.Token -->
+  <!-- start class org.apache.hadoop.security.token.Token.TrivialRenewer -->
+  <class name="Token.TrivialRenewer" extends="org.apache.hadoop.security.token.TokenRenewer"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TrivialRenewer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="handleKind" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="kind" type="org.apache.hadoop.io.Text"/>
+    </method>
+    <method name="isManaged" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+    </method>
+    <method name="renew" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="cancel"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <doc>
+    <![CDATA[A trivial renewer for token kinds that aren't managed. Sub-classes need
+ to implement getKind for their token kind.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.token.Token.TrivialRenewer -->
+  <!-- start class org.apache.hadoop.security.token.TokenIdentifier -->
+  <class name="TokenIdentifier" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="TokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the token kind
+ @return the kind of the token]]>
+      </doc>
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the Ugi with the username encoded in the token identifier
+ 
+ @return the username. null is returned if username in the identifier is
+         empty or null.]]>
+      </doc>
+    </method>
+    <method name="getBytes" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the bytes for the token identifier
+ @return the bytes of the identifier]]>
+      </doc>
+    </method>
+    <method name="getTrackingId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns a tracking identifier that can be used to associate usages of a
+ token across multiple client sessions.
+
+ Currently, this function just returns an MD5 of {{@link #getBytes()}.
+
+ @return tracking identifier]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[An identifier that identifies a token, may contain public information 
+ about a token, including its kind (or type).]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.token.TokenIdentifier -->
+  <!-- start class org.apache.hadoop.security.token.TokenInfo -->
+  <class name="TokenInfo"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.annotation.Annotation"/>
+    <doc>
+    <![CDATA[Indicates Token related information to be used]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.token.TokenInfo -->
+  <!-- start class org.apache.hadoop.security.token.TokenRenewer -->
+  <class name="TokenRenewer" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TokenRenewer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="handleKind" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="kind" type="org.apache.hadoop.io.Text"/>
+      <doc>
+      <![CDATA[Does this renewer handle this kind of token?
+ @param kind the kind of the token
+ @return true if this renewer can renew it]]>
+      </doc>
+    </method>
+    <method name="isManaged" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Is the given token managed? Only managed tokens may be renewed or
+ cancelled.
+ @param token the token being checked
+ @return true if the token may be renewed or cancelled
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="renew" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Renew the given token.
+ @return the new expiration time
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="cancel"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Cancel the given token
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the interface for plugins that handle tokens.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.token.TokenRenewer -->
+  <!-- start interface org.apache.hadoop.security.token.TokenSelector -->
+  <interface name="TokenSelector"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+    <doc>
+    <![CDATA[Select token of type T from tokens for use with named service
+ 
+ @param <T>
+          T extends TokenIdentifier]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.security.token.TokenSelector -->
+</package>
+<package name="org.apache.hadoop.security.token.delegation.web">
+  <!-- start class org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL -->
+  <class name="DelegationTokenAuthenticatedURL" extends="org.apache.hadoop.security.authentication.client.AuthenticatedURL"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="DelegationTokenAuthenticatedURL"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Creates an <code>DelegationTokenAuthenticatedURL</code>.
+ <p>
+ An instance of the default {@link DelegationTokenAuthenticator} will be
+ used.]]>
+      </doc>
+    </constructor>
+    <constructor name="DelegationTokenAuthenticatedURL" type="org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Creates an <code>DelegationTokenAuthenticatedURL</code>.
+
+ @param authenticator the {@link DelegationTokenAuthenticator} instance to
+ use, if <code>null</code> the default one will be used.]]>
+      </doc>
+    </constructor>
+    <constructor name="DelegationTokenAuthenticatedURL" type="org.apache.hadoop.security.authentication.client.ConnectionConfigurator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Creates an <code>DelegationTokenAuthenticatedURL</code> using the default
+ {@link DelegationTokenAuthenticator} class.
+
+ @param connConfigurator a connection configurator.]]>
+      </doc>
+    </constructor>
+    <constructor name="DelegationTokenAuthenticatedURL" type="org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator, org.apache.hadoop.security.authentication.client.ConnectionConfigurator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Creates an <code>DelegationTokenAuthenticatedURL</code>.
+
+ @param authenticator the {@link DelegationTokenAuthenticator} instance to
+ use, if <code>null</code> the default one will be used.
+ @param connConfigurator a connection configurator.]]>
+      </doc>
+    </constructor>
+    <method name="setDefaultDelegationTokenAuthenticator"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="authenticator" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Sets the default {@link DelegationTokenAuthenticator} class to use when an
+ {@link DelegationTokenAuthenticatedURL} instance is created without
+ specifying one.
+
+ The default class is {@link KerberosDelegationTokenAuthenticator}
+
+ @param authenticator the authenticator class to use as default.]]>
+      </doc>
+    </method>
+    <method name="getDefaultDelegationTokenAuthenticator" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the default {@link DelegationTokenAuthenticator} class to use when
+ an {@link DelegationTokenAuthenticatedURL} instance is created without
+ specifying one.
+ <p>
+ The default class is {@link KerberosDelegationTokenAuthenticator}
+
+ @return the delegation token authenticator class to use as default.]]>
+      </doc>
+    </method>
+    <method name="setUseQueryStringForDelegationToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="useQueryString" type="boolean"/>
+      <doc>
+      <![CDATA[Sets if delegation token should be transmitted in the URL query string.
+ By default it is transmitted using the
+ {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP header.
+ <p>
+ This method is provided to enable WebHDFS backwards compatibility.
+
+ @param useQueryString  <code>TRUE</code> if the token is transmitted in the
+ URL query string, <code>FALSE</code> if the delegation token is transmitted
+ using the {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP
+ header.]]>
+      </doc>
+    </method>
+    <method name="useQueryStringForDelegationToken" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns if delegation token is transmitted as a HTTP header.
+
+ @return <code>TRUE</code> if the token is transmitted in the URL query
+ string, <code>FALSE</code> if the delegation token is transmitted using the
+ {@link DelegationTokenAuthenticator#DELEGATION_TOKEN_HEADER} HTTP header.]]>
+      </doc>
+    </method>
+    <method name="openConnection" return="java.net.HttpURLConnection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.authentication.client.AuthenticatedURL.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AuthenticationException" type="org.apache.hadoop.security.authentication.client.AuthenticationException"/>
+      <doc>
+      <![CDATA[Returns an authenticated {@link HttpURLConnection}, it uses a Delegation
+ Token only if the given auth token is an instance of {@link Token} and
+ it contains a Delegation Token, otherwise use the configured
+ {@link DelegationTokenAuthenticator} to authenticate the connection.
+
+ @param url the URL to connect to. Only HTTP/S URLs are supported.
+ @param token the authentication token being used for the user.
+ @return an authenticated {@link HttpURLConnection}.
+ @throws IOException if an IO error occurred.
+ @throws AuthenticationException if an authentication exception occurred.]]>
+      </doc>
+    </method>
+    <method name="openConnection" return="java.net.HttpURLConnection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AuthenticationException" type="org.apache.hadoop.security.authentication.client.AuthenticationException"/>
+      <doc>
+      <![CDATA[Returns an authenticated {@link HttpURLConnection}. If the Delegation
+ Token is present, it will be used taking precedence over the configured
+ <code>Authenticator</code>.
+
+ @param url the URL to connect to. Only HTTP/S URLs are supported.
+ @param token the authentication token being used for the user.
+ @return an authenticated {@link HttpURLConnection}.
+ @throws IOException if an IO error occurred.
+ @throws AuthenticationException if an authentication exception occurred.]]>
+      </doc>
+    </method>
+    <method name="openConnection" return="java.net.HttpURLConnection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token"/>
+      <param name="doAs" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AuthenticationException" type="org.apache.hadoop.security.authentication.client.AuthenticationException"/>
+      <doc>
+      <![CDATA[Returns an authenticated {@link HttpURLConnection}. If the Delegation
+ Token is present, it will be used taking precedence over the configured
+ <code>Authenticator</code>. If the <code>doAs</code> parameter is not NULL,
+ the request will be done on behalf of the specified <code>doAs</code> user.
+
+ @param url the URL to connect to. Only HTTP/S URLs are supported.
+ @param token the authentication token being used for the user.
+ @param doAs user to do the the request on behalf of, if NULL the request is
+ as self.
+ @return an authenticated {@link HttpURLConnection}.
+ @throws IOException if an IO error occurred.
+ @throws AuthenticationException if an authentication exception occurred.]]>
+      </doc>
+    </method>
+    <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token"/>
+      <param name="renewer" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AuthenticationException" type="org.apache.hadoop.security.authentication.client.AuthenticationException"/>
+      <doc>
+      <![CDATA[Requests a delegation token using the configured <code>Authenticator</code>
+ for authentication.
+
+ @param url the URL to get the delegation token from. Only HTTP/S URLs are
+ supported.
+ @param token the authentication token being used for the user where the
+ Delegation token will be stored.
+ @param renewer the renewer user.
+ @return a delegation token.
+ @throws IOException if an IO error occurred.
+ @throws AuthenticationException if an authentication exception occurred.]]>
+      </doc>
+    </method>
+    <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token"/>
+      <param name="renewer" type="java.lang.String"/>
+      <param name="doAsUser" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AuthenticationException" type="org.apache.hadoop.security.authentication.client.AuthenticationException"/>
+      <doc>
+      <![CDATA[Requests a delegation token using the configured <code>Authenticator</code>
+ for authentication.
+
+ @param url the URL to get the delegation token from. Only HTTP/S URLs are
+ supported.
+ @param token the authentication token being used for the user where the
+ Delegation token will be stored.
+ @param renewer the renewer user.
+ @param doAsUser the user to do as, which will be the token owner.
+ @return a delegation token.
+ @throws IOException if an IO error occurred.
+ @throws AuthenticationException if an authentication exception occurred.]]>
+      </doc>
+    </method>
+    <method name="renewDelegationToken" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AuthenticationException" type="org.apache.hadoop.security.authentication.client.AuthenticationException"/>
+      <doc>
+      <![CDATA[Renews a delegation token from the server end-point using the
+ configured <code>Authenticator</code> for authentication.
+
+ @param url the URL to renew the delegation token from. Only HTTP/S URLs are
+ supported.
+ @param token the authentication token with the Delegation Token to renew.
+ @throws IOException if an IO error occurred.
+ @throws AuthenticationException if an authentication exception occurred.]]>
+      </doc>
+    </method>
+    <method name="renewDelegationToken" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token"/>
+      <param name="doAsUser" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AuthenticationException" type="org.apache.hadoop.security.authentication.client.AuthenticationException"/>
+      <doc>
+      <![CDATA[Renews a delegation token from the server end-point using the
+ configured <code>Authenticator</code> for authentication.
+
+ @param url the URL to renew the delegation token from. Only HTTP/S URLs are
+ supported.
+ @param token the authentication token with the Delegation Token to renew.
+ @param doAsUser the user to do as, which will be the token owner.
+ @throws IOException if an IO error occurred.
+ @throws AuthenticationException if an authentication exception occurred.]]>
+      </doc>
+    </method>
+    <method name="cancelDelegationToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Cancels a delegation token from the server end-point. It does not require
+ being authenticated by the configured <code>Authenticator</code>.
+
+ @param url the URL to cancel the delegation token from. Only HTTP/S URLs
+ are supported.
+ @param token the authentication token with the Delegation Token to cancel.
+ @throws IOException if an IO error occurred.]]>
+      </doc>
+    </method>
+    <method name="cancelDelegationToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token"/>
+      <param name="doAsUser" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Cancels a delegation token from the server end-point. It does not require
+ being authenticated by the configured <code>Authenticator</code>.
+
+ @param url the URL to cancel the delegation token from. Only HTTP/S URLs
+ are supported.
+ @param token the authentication token with the Delegation Token to cancel.
+ @param doAsUser the user to do as, which will be the token owner.
+ @throws IOException if an IO error occurred.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The <code>DelegationTokenAuthenticatedURL</code> is a
+ {@link AuthenticatedURL} sub-class with built-in Hadoop Delegation Token
+ functionality.
+ <p>
+ The authentication mechanisms supported by default are Hadoop Simple
+ authentication (also known as pseudo authentication) and Kerberos SPNEGO
+ authentication.
+ <p>
+ Additional authentication mechanisms can be supported via {@link
+ DelegationTokenAuthenticator} implementations.
+ <p>
+ The default {@link DelegationTokenAuthenticator} is the {@link
+ KerberosDelegationTokenAuthenticator} class which supports
+ automatic fallback from Kerberos SPNEGO to Hadoop Simple authentication via
+ the {@link PseudoDelegationTokenAuthenticator} class.
+ <p>
+ <code>AuthenticatedURL</code> instances are not thread-safe.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL -->
+  <!-- start class org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token -->
+  <class name="DelegationTokenAuthenticatedURL.Token" extends="org.apache.hadoop.security.authentication.client.AuthenticatedURL.Token"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Token"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setDelegationToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="delegationToken" type="org.apache.hadoop.security.token.Token"/>
+    </method>
+    <doc>
+    <![CDATA[Client side authentication token that handles Delegation Tokens.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL.Token -->
+  <!-- start class org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator -->
+  <class name="DelegationTokenAuthenticator" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.authentication.client.Authenticator"/>
+    <constructor name="DelegationTokenAuthenticator" type="org.apache.hadoop.security.authentication.client.Authenticator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setConnectionConfigurator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="configurator" type="org.apache.hadoop.security.authentication.client.ConnectionConfigurator"/>
+    </method>
+    <method name="authenticate"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.authentication.client.AuthenticatedURL.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AuthenticationException" type="org.apache.hadoop.security.authentication.client.AuthenticationException"/>
+    </method>
+    <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.authentication.client.AuthenticatedURL.Token"/>
+      <param name="renewer" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AuthenticationException" type="org.apache.hadoop.security.authentication.client.AuthenticationException"/>
+      <doc>
+      <![CDATA[Requests a delegation token using the configured <code>Authenticator</code>
+ for authentication.
+
+ @param url the URL to get the delegation token from. Only HTTP/S URLs are
+ supported.
+ @param token the authentication token being used for the user where the
+ Delegation token will be stored.
+ @param renewer the renewer user.
+ @throws IOException if an IO error occurred.
+ @throws AuthenticationException if an authentication exception occurred.]]>
+      </doc>
+    </method>
+    <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.authentication.client.AuthenticatedURL.Token"/>
+      <param name="renewer" type="java.lang.String"/>
+      <param name="doAsUser" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AuthenticationException" type="org.apache.hadoop.security.authentication.client.AuthenticationException"/>
+      <doc>
+      <![CDATA[Requests a delegation token using the configured <code>Authenticator</code>
+ for authentication.
+
+ @param url the URL to get the delegation token from. Only HTTP/S URLs are
+ supported.
+ @param token the authentication token being used for the user where the
+ Delegation token will be stored.
+ @param renewer the renewer user.
+ @param doAsUser the user to do as, which will be the token owner.
+ @throws IOException if an IO error occurred.
+ @throws AuthenticationException if an authentication exception occurred.]]>
+      </doc>
+    </method>
+    <method name="renewDelegationToken" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.authentication.client.AuthenticatedURL.Token"/>
+      <param name="dToken" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AuthenticationException" type="org.apache.hadoop.security.authentication.client.AuthenticationException"/>
+      <doc>
+      <![CDATA[Renews a delegation token from the server end-point using the
+ configured <code>Authenticator</code> for authentication.
+
+ @param url the URL to renew the delegation token from. Only HTTP/S URLs are
+ supported.
+ @param token the authentication token with the Delegation Token to renew.
+ @throws IOException if an IO error occurred.
+ @throws AuthenticationException if an authentication exception occurred.]]>
+      </doc>
+    </method>
+    <method name="renewDelegationToken" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.authentication.client.AuthenticatedURL.Token"/>
+      <param name="dToken" type="org.apache.hadoop.security.token.Token"/>
+      <param name="doAsUser" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AuthenticationException" type="org.apache.hadoop.security.authentication.client.AuthenticationException"/>
+      <doc>
+      <![CDATA[Renews a delegation token from the server end-point using the
+ configured <code>Authenticator</code> for authentication.
+
+ @param url the URL to renew the delegation token from. Only HTTP/S URLs are
+ supported.
+ @param token the authentication token with the Delegation Token to renew.
+ @param doAsUser the user to do as, which will be the token owner.
+ @throws IOException if an IO error occurred.
+ @throws AuthenticationException if an authentication exception occurred.]]>
+      </doc>
+    </method>
+    <method name="cancelDelegationToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.authentication.client.AuthenticatedURL.Token"/>
+      <param name="dToken" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Cancels a delegation token from the server end-point. It does not require
+ being authenticated by the configured <code>Authenticator</code>.
+
+ @param url the URL to cancel the delegation token from. Only HTTP/S URLs
+ are supported.
+ @param token the authentication token with the Delegation Token to cancel.
+ @throws IOException if an IO error occurred.]]>
+      </doc>
+    </method>
+    <method name="cancelDelegationToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.net.URL"/>
+      <param name="token" type="org.apache.hadoop.security.authentication.client.AuthenticatedURL.Token"/>
+      <param name="dToken" type="org.apache.hadoop.security.token.Token"/>
+      <param name="doAsUser" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Cancels a delegation token from the server end-point. It does not require
+ being authenticated by the configured <code>Authenticator</code>.
+
+ @param url the URL to cancel the delegation token from. Only HTTP/S URLs
+ are supported.
+ @param token the authentication token with the Delegation Token to cancel.
+ @param doAsUser the user to do as, which will be the token owner.
+ @throws IOException if an IO error occurred.]]>
+      </doc>
+    </method>
+    <field name="OP_PARAM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DELEGATION_TOKEN_HEADER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DELEGATION_PARAM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TOKEN_PARAM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RENEWER_PARAM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SERVICE_PARAM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DELEGATION_TOKEN_JSON" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DELEGATION_TOKEN_URL_STRING_JSON" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RENEW_DELEGATION_TOKEN_JSON" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[{@link Authenticator} wrapper that enhances an {@link Authenticator} with
+ Delegation Token support.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator -->
+  <!-- start class org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator -->
+  <class name="KerberosDelegationTokenAuthenticator" extends="org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="KerberosDelegationTokenAuthenticator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[The <code>KerberosDelegationTokenAuthenticator</code> provides support for
+ Kerberos SPNEGO authentication mechanism and support for Hadoop Delegation
+ Token operations.
+ <p>
+ It falls back to the {@link PseudoDelegationTokenAuthenticator} if the HTTP
+ endpoint does not trigger a SPNEGO authentication]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator -->
+  <!-- start class org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticator -->
+  <class name="PseudoDelegationTokenAuthenticator" extends="org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="PseudoDelegationTokenAuthenticator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[The <code>PseudoDelegationTokenAuthenticator</code> provides support for
+ Hadoop's pseudo authentication mechanism that accepts
+ the user name specified as a query string parameter and support for Hadoop
+ Delegation Token operations.
+ <p>
+ This mimics the model of Hadoop Simple authentication trusting the
+ {@link UserGroupInformation#getCurrentUser()} value.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticator -->
+</package>
+<package name="org.apache.hadoop.service">
+  <!-- start class org.apache.hadoop.service.AbstractService -->
+  <class name="AbstractService" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.service.Service"/>
+    <constructor name="AbstractService" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct the service.
+ @param name service name]]>
+      </doc>
+    </constructor>
+    <method name="getServiceState" return="org.apache.hadoop.service.Service.STATE"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFailureCause" return="java.lang.Throwable"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFailureState" return="org.apache.hadoop.service.Service.STATE"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setConfig"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Set the configuration for this service.
+ This method is called during {@link #init(Configuration)}
+ and should only be needed if for some reason a service implementation
+ needs to override that initial setting -for example replacing
+ it with a new subclass of {@link Configuration}
+ @param conf new configuration.]]>
+      </doc>
+    </method>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[{@inheritDoc}
+ This invokes {@link #serviceInit}
+ @param conf the configuration of the service. This must not be null
+ @throws ServiceStateException if the configuration was null,
+ the state change not permitted, or something else went wrong]]>
+      </doc>
+    </method>
+    <method name="start"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@inheritDoc}
+ @throws ServiceStateException if the current service state does not permit
+ this action]]>
+      </doc>
+    </method>
+    <method name="stop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Relay to {@link #stop()}
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="noteFailure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+      <param name="exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[Failure handling: record the exception
+ that triggered it -if there was not one already.
+ Services are free to call this themselves.
+ @param exception the exception]]>
+      </doc>
+    </method>
+    <method name="waitForServiceToStop" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="timeout" type="long"/>
+    </method>
+    <method name="serviceInit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[All initialization code needed by a service.
+
+ This method will only ever be called once during the lifecycle of
+ a specific service instance.
+
+ Implementations do not need to be synchronized as the logic
+ in {@link #init(Configuration)} prevents re-entrancy.
+
+ The base implementation checks to see if the subclass has created
+ a new configuration instance, and if so, updates the base class value
+ @param conf configuration
+ @throws Exception on a failure -these will be caught,
+ possibly wrapped, and will trigger a service stop]]>
+      </doc>
+    </method>
+    <method name="serviceStart"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[Actions called during the INITED to STARTED transition.
+
+ This method will only ever be called once during the lifecycle of
+ a specific service instance.
+
+ Implementations do not need to be synchronized as the logic
+ in {@link #start()} prevents re-entrancy.
+
+ @throws Exception if needed -these will be caught,
+ wrapped, and trigger a service stop]]>
+      </doc>
+    </method>
+    <method name="serviceStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[Actions called during the transition to the STOPPED state.
+
+ This method will only ever be called once during the lifecycle of
+ a specific service instance.
+
+ Implementations do not need to be synchronized as the logic
+ in {@link #stop()} prevents re-entrancy.
+
+ Implementations MUST write this to be robust against failures, including
+ checks for null references -and for the first failure to not stop other
+ attempts to shut down parts of the service.
+
+ @throws Exception if needed -these will be caught and logged.]]>
+      </doc>
+    </method>
+    <method name="registerServiceListener"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="l" type="org.apache.hadoop.service.ServiceStateChangeListener"/>
+    </method>
+    <method name="unregisterServiceListener"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="l" type="org.apache.hadoop.service.ServiceStateChangeListener"/>
+    </method>
+    <method name="registerGlobalListener"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="l" type="org.apache.hadoop.service.ServiceStateChangeListener"/>
+      <doc>
+      <![CDATA[Register a global listener, which receives notifications
+ from the state change events of all services in the JVM
+ @param l listener]]>
+      </doc>
+    </method>
+    <method name="unregisterGlobalListener" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="l" type="org.apache.hadoop.service.ServiceStateChangeListener"/>
+      <doc>
+      <![CDATA[unregister a global listener.
+ @param l listener to unregister
+ @return true if the listener was found (and then deleted)]]>
+      </doc>
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getConfig" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStartTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getLifecycleHistory" return="java.util.List"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isInState" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="expected" type="org.apache.hadoop.service.Service.STATE"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="putBlocker"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="details" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Put a blocker to the blocker map -replacing any
+ with the same name.
+ @param name blocker name
+ @param details any specifics on the block. This must be non-null.]]>
+      </doc>
+    </method>
+    <method name="removeBlocker"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Remove a blocker from the blocker map -
+ this is a no-op if the blocker is not present
+ @param name the name of the blocker]]>
+      </doc>
+    </method>
+    <method name="getBlockers" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[This is the base implementation class for services.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.service.AbstractService -->
+  <!-- start class org.apache.hadoop.service.CompositeService -->
+  <class name="CompositeService" extends="org.apache.hadoop.service.AbstractService"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CompositeService" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getServices" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a cloned list of services
+ @return a list of child services at the time of invocation -
+ added services will not be picked up.]]>
+      </doc>
+    </method>
+    <method name="addService"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.service.Service"/>
+      <doc>
+      <![CDATA[Add the passed {@link Service} to the list of services managed by this
+ {@link CompositeService}
+ @param service the {@link Service} to be added]]>
+      </doc>
+    </method>
+    <method name="addIfService" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="object" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[If the passed object is an instance of {@link Service},
+ add it to the list of services managed by this {@link CompositeService}
+ @param object
+ @return true if a service is added, false otherwise.]]>
+      </doc>
+    </method>
+    <method name="removeService" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.service.Service"/>
+    </method>
+    <method name="serviceInit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="serviceStart"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="serviceStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <field name="STOP_ONLY_STARTED_SERVICES" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Policy on shutdown: attempt to close everything (purest) or
+ only try to close started services (which assumes
+ that the service implementations may not handle the stop() operation
+ except when started.
+ Irrespective of this policy, if a child service fails during
+ its init() or start() operations, it will have stop() called on it.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[Composition of services.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.service.CompositeService -->
+  <!-- start class org.apache.hadoop.service.LifecycleEvent -->
+  <class name="LifecycleEvent" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Serializable"/>
+    <constructor name="LifecycleEvent"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <field name="time" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Local time in milliseconds when the event occurred]]>
+      </doc>
+    </field>
+    <field name="state" type="org.apache.hadoop.service.Service.STATE"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[new state]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[A serializable lifecycle event: the time a state
+ transition occurred, and what state was entered.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.service.LifecycleEvent -->
+  <!-- start class org.apache.hadoop.service.LoggingStateChangeListener -->
+  <class name="LoggingStateChangeListener" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.service.ServiceStateChangeListener"/>
+    <constructor name="LoggingStateChangeListener" type="org.slf4j.Logger"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Log events to the given log
+ @param log destination for events]]>
+      </doc>
+    </constructor>
+    <constructor name="LoggingStateChangeListener"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Log events to the static log for this class]]>
+      </doc>
+    </constructor>
+    <method name="stateChanged"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.service.Service"/>
+      <doc>
+      <![CDATA[Callback for a state change event: log it
+ @param service the service that has changed.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is a state change listener that logs events at INFO level]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.service.LoggingStateChangeListener -->
+  <!-- start interface org.apache.hadoop.service.Service -->
+  <interface name="Service"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Closeable"/>
+    <method name="init"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="config" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Initialize the service.
+
+ The transition MUST be from {@link STATE#NOTINITED} to {@link STATE#INITED}
+ unless the operation failed and an exception was raised, in which case
+ {@link #stop()} MUST be invoked and the service enter the state
+ {@link STATE#STOPPED}.
+ @param config the configuration of the service
+ @throws RuntimeException on any failure during the operation]]>
+      </doc>
+    </method>
+    <method name="start"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Start the service.
+
+ The transition MUST be from {@link STATE#INITED} to {@link STATE#STARTED}
+ unless the operation failed and an exception was raised, in which case
+ {@link #stop()} MUST be invoked and the service enter the state
+ {@link STATE#STOPPED}.
+ @throws RuntimeException on any failure during the operation]]>
+      </doc>
+    </method>
+    <method name="stop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Stop the service. This MUST be a no-op if the service is already
+ in the {@link STATE#STOPPED} state. It SHOULD be a best-effort attempt
+ to stop all parts of the service.
+
+ The implementation must be designed to complete regardless of the service
+ state, including the initialized/uninitialized state of all its internal
+ fields.
+ @throws RuntimeException on any failure during the stop operation]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[A version of stop() that is designed to be usable in Java7 closure
+ clauses.
+ Implementation classes MUST relay this directly to {@link #stop()}
+ @throws IOException never
+ @throws RuntimeException on any failure during the stop operation]]>
+      </doc>
+    </method>
+    <method name="registerServiceListener"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="listener" type="org.apache.hadoop.service.ServiceStateChangeListener"/>
+      <doc>
+      <![CDATA[Register a listener to the service state change events.
+ If the supplied listener is already listening to this service,
+ this method is a no-op.
+ @param listener a new listener]]>
+      </doc>
+    </method>
+    <method name="unregisterServiceListener"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="listener" type="org.apache.hadoop.service.ServiceStateChangeListener"/>
+      <doc>
+      <![CDATA[Unregister a previously registered listener of the service state
+ change events. No-op if the listener is already unregistered.
+ @param listener the listener to unregister.]]>
+      </doc>
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the name of this service.
+ @return the service name]]>
+      </doc>
+    </method>
+    <method name="getConfig" return="org.apache.hadoop.conf.Configuration"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the configuration of this service.
+ This is normally not a clone and may be manipulated, though there are no
+ guarantees as to what the consequences of such actions may be
+ @return the current configuration, unless a specific implentation chooses
+ otherwise.]]>
+      </doc>
+    </method>
+    <method name="getServiceState" return="org.apache.hadoop.service.Service.STATE"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current service state
+ @return the state of the service]]>
+      </doc>
+    </method>
+    <method name="getStartTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the service start time
+ @return the start time of the service. This will be zero if the service
+ has not yet been started.]]>
+      </doc>
+    </method>
+    <method name="isInState" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="state" type="org.apache.hadoop.service.Service.STATE"/>
+      <doc>
+      <![CDATA[Query to see if the service is in a specific state.
+ In a multi-threaded system, the state may not hold for very long.
+ @param state the expected state
+ @return true if, at the time of invocation, the service was in that state.]]>
+      </doc>
+    </method>
+    <method name="getFailureCause" return="java.lang.Throwable"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the first exception raised during the service failure. If null,
+ no exception was logged
+ @return the failure logged during a transition to the stopped state]]>
+      </doc>
+    </method>
+    <method name="getFailureState" return="org.apache.hadoop.service.Service.STATE"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the state in which the failure in {@link #getFailureCause()} occurred.
+ @return the state or null if there was no failure]]>
+      </doc>
+    </method>
+    <method name="waitForServiceToStop" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timeout" type="long"/>
+      <doc>
+      <![CDATA[Block waiting for the service to stop; uses the termination notification
+ object to do so.
+
+ This method will only return after all the service stop actions
+ have been executed (to success or failure), or the timeout elapsed
+ This method can be called before the service is inited or started; this is
+ to eliminate any race condition with the service stopping before
+ this event occurs.
+ @param timeout timeout in milliseconds. A value of zero means "forever"
+ @return true iff the service stopped in the time period]]>
+      </doc>
+    </method>
+    <method name="getLifecycleHistory" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a snapshot of the lifecycle history; it is a static list
+ @return a possibly empty but never null list of lifecycle events.]]>
+      </doc>
+    </method>
+    <method name="getBlockers" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the blockers on a service -remote dependencies
+ that are stopping the service from being <i>live</i>.
+ @return a (snapshotted) map of blocker name-&gt;description values]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Service LifeCycle.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.service.Service -->
+  <!-- start class org.apache.hadoop.service.ServiceOperations -->
+  <class name="ServiceOperations" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="stop"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.service.Service"/>
+      <doc>
+      <![CDATA[Stop a service.
+ <p>Do nothing if the service is null or not
+ in a state in which it can be/needs to be stopped.
+ <p>
+ The service state is checked <i>before</i> the operation begins.
+ This process is <i>not</i> thread safe.
+ @param service a service or null]]>
+      </doc>
+    </method>
+    <method name="stopQuietly" return="java.lang.Exception"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.service.Service"/>
+      <doc>
+      <![CDATA[Stop a service; if it is null do nothing. Exceptions are caught and
+ logged at warn level. (but not Throwables). This operation is intended to
+ be used in cleanup operations
+
+ @param service a service; may be null
+ @return any exception that was caught; null if none was.]]>
+      </doc>
+    </method>
+    <method name="stopQuietly" return="java.lang.Exception"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="log" type="org.apache.commons.logging.Log"/>
+      <param name="service" type="org.apache.hadoop.service.Service"/>
+      <doc>
+      <![CDATA[Stop a service; if it is null do nothing. Exceptions are caught and
+ logged at warn level. (but not Throwables). This operation is intended to
+ be used in cleanup operations
+
+ @param log the log to warn at
+ @param service a service; may be null
+ @return any exception that was caught; null if none was.
+ @see ServiceOperations#stopQuietly(Service)]]>
+      </doc>
+    </method>
+    <method name="stopQuietly" return="java.lang.Exception"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="log" type="org.slf4j.Logger"/>
+      <param name="service" type="org.apache.hadoop.service.Service"/>
+      <doc>
+      <![CDATA[Stop a service; if it is null do nothing. Exceptions are caught and
+ logged at warn level. (but not Throwables). This operation is intended to
+ be used in cleanup operations
+
+ @param log the log to warn at
+ @param service a service; may be null
+ @return any exception that was caught; null if none was.
+ @see ServiceOperations#stopQuietly(Service)]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class contains a set of methods to work with services, especially
+ to walk them through their lifecycle.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.service.ServiceOperations -->
+  <!-- start interface org.apache.hadoop.service.ServiceStateChangeListener -->
+  <interface name="ServiceStateChangeListener"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="stateChanged"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.service.Service"/>
+      <doc>
+      <![CDATA[Callback to notify of a state change. The service will already
+ have changed state before this callback is invoked.
+
+ This operation is invoked on the thread that initiated the state change,
+ while the service itself in in a sychronized section.
+ <ol>
+   <li>Any long-lived operation here will prevent the service state
+   change from completing in a timely manner.</li>
+   <li>If another thread is somehow invoked from the listener, and
+   that thread invokes the methods of the service (including
+   subclass-specific methods), there is a risk of a deadlock.</li>
+ </ol>
+
+
+ @param service the service that has changed.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface to notify state changes of a service.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.service.ServiceStateChangeListener -->
+  <!-- start class org.apache.hadoop.service.ServiceStateException -->
+  <class name="ServiceStateException" extends="java.lang.RuntimeException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.util.ExitCodeProvider"/>
+    <constructor name="ServiceStateException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Instantiate
+ @param message error message]]>
+      </doc>
+    </constructor>
+    <constructor name="ServiceStateException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Instantiate with a message and cause; if the cause has an exit code
+ then it is used, otherwise the generic
+ {@link LauncherExitCodes#EXIT_SERVICE_LIFECYCLE_EXCEPTION} exit code
+ is used.
+ @param message exception message
+ @param cause optional inner cause]]>
+      </doc>
+    </constructor>
+    <constructor name="ServiceStateException" type="int, java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Instantiate, using the specified exit code as the exit code
+ of the exception, irrespetive of any exit code supplied by any inner
+ cause.
+
+ @param exitCode exit code to declare
+ @param message exception message
+ @param cause inner cause]]>
+      </doc>
+    </constructor>
+    <constructor name="ServiceStateException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getExitCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="convert" return="java.lang.RuntimeException"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fault" type="java.lang.Throwable"/>
+      <doc>
+      <![CDATA[Convert any exception into a {@link RuntimeException}.
+ All other exception types are wrapped in a new instance of
+ {@code ServiceStateException}.
+ @param fault exception or throwable
+ @return a {@link RuntimeException} to rethrow]]>
+      </doc>
+    </method>
+    <method name="convert" return="java.lang.RuntimeException"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="text" type="java.lang.String"/>
+      <param name="fault" type="java.lang.Throwable"/>
+      <doc>
+      <![CDATA[Convert any exception into a {@link RuntimeException}.
+ If the caught exception is already of that type, it is typecast to a
+ {@link RuntimeException} and returned.
+
+ All other exception types are wrapped in a new instance of
+ {@code ServiceStateException}.
+ @param text text to use if a new exception is created
+ @param fault exception or throwable
+ @return a {@link RuntimeException} to rethrow]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Exception that can be raised on state change operations, whose
+ exit code can be explicitly set, determined from that of any nested
+ cause, or a default value of
+ {@link  LauncherExitCodes#EXIT_SERVICE_LIFECYCLE_EXCEPTION}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.service.ServiceStateException -->
+  <!-- start class org.apache.hadoop.service.ServiceStateModel -->
+  <class name="ServiceStateModel" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ServiceStateModel" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create the service state model in the {@link Service.STATE#NOTINITED}
+ state.]]>
+      </doc>
+    </constructor>
+    <constructor name="ServiceStateModel" type="java.lang.String, org.apache.hadoop.service.Service.STATE"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a service state model instance in the chosen state
+ @param state the starting state]]>
+      </doc>
+    </constructor>
+    <method name="getState" return="org.apache.hadoop.service.Service.STATE"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Query the service state. This is a non-blocking operation.
+ @return the state]]>
+      </doc>
+    </method>
+    <method name="isInState" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="proposed" type="org.apache.hadoop.service.Service.STATE"/>
+      <doc>
+      <![CDATA[Query that the state is in a specific state
+ @param proposed proposed new state
+ @return the state]]>
+      </doc>
+    </method>
+    <method name="ensureCurrentState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="expectedState" type="org.apache.hadoop.service.Service.STATE"/>
+      <doc>
+      <![CDATA[Verify that that a service is in a given state.
+ @param expectedState the desired state
+ @throws ServiceStateException if the service state is different from
+ the desired state]]>
+      </doc>
+    </method>
+    <method name="enterState" return="org.apache.hadoop.service.Service.STATE"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="proposed" type="org.apache.hadoop.service.Service.STATE"/>
+      <doc>
+      <![CDATA[Enter a state -thread safe.
+
+ @param proposed proposed new state
+ @return the original state
+ @throws ServiceStateException if the transition is not permitted]]>
+      </doc>
+    </method>
+    <method name="checkStateTransition"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="state" type="org.apache.hadoop.service.Service.STATE"/>
+      <param name="proposed" type="org.apache.hadoop.service.Service.STATE"/>
+      <doc>
+      <![CDATA[Check that a state tansition is valid and
+ throw an exception if not
+ @param name name of the service (can be null)
+ @param state current state
+ @param proposed proposed new state]]>
+      </doc>
+    </method>
+    <method name="isValidStateTransition" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="current" type="org.apache.hadoop.service.Service.STATE"/>
+      <param name="proposed" type="org.apache.hadoop.service.Service.STATE"/>
+      <doc>
+      <![CDATA[Is a state transition valid?
+ There are no checks for current==proposed
+ as that is considered a non-transition.
+
+ using an array kills off all branch misprediction costs, at the expense
+ of cache line misses.
+
+ @param current current state
+ @param proposed proposed new state
+ @return true if the transition to a new state is valid]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[return the state text as the toString() value
+ @return the current state's description]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Implements the service state model.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.service.ServiceStateModel -->
+</package>
+<package name="org.apache.hadoop.service.launcher">
+  <!-- start class org.apache.hadoop.service.launcher.AbstractLaunchableService -->
+  <class name="AbstractLaunchableService" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.service.launcher.LaunchableService"/>
+    <constructor name="AbstractLaunchableService" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct an instance with the given name.]]>
+      </doc>
+    </constructor>
+    <method name="bindArgs" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="config" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="args" type="java.util.List"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[{@inheritDoc}
+ <p>
+ The base implementation logs all arguments at the debug level,
+ then returns the passed in config unchanged.]]>
+      </doc>
+    </method>
+    <method name="execute" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[{@inheritDoc}
+ <p>
+ The action is to signal success by returning the exit code 0.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Subclass of {@link AbstractService} that provides basic implementations
+ of the {@link LaunchableService} methods.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.service.launcher.AbstractLaunchableService -->
+  <!-- start class org.apache.hadoop.service.launcher.HadoopUncaughtExceptionHandler -->
+  <class name="HadoopUncaughtExceptionHandler" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Thread.UncaughtExceptionHandler"/>
+    <constructor name="HadoopUncaughtExceptionHandler" type="java.lang.Thread.UncaughtExceptionHandler"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an instance delegating to the supplied handler if
+ the exception is considered "simple".
+ @param delegate a delegate exception handler.]]>
+      </doc>
+    </constructor>
+    <constructor name="HadoopUncaughtExceptionHandler"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Basic exception handler -logs simple exceptions, then continues.]]>
+      </doc>
+    </constructor>
+    <method name="uncaughtException"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="thread" type="java.lang.Thread"/>
+      <param name="exception" type="java.lang.Throwable"/>
+      <doc>
+      <![CDATA[Uncaught exception handler.
+ If an error is raised: shutdown
+ The state of the system is unknown at this point -attempting
+ a clean shutdown is dangerous. Instead: exit
+ @param thread thread that failed
+ @param exception the raised exception]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class is intended to be installed by calling 
+ {@link Thread#setDefaultUncaughtExceptionHandler(UncaughtExceptionHandler)}
+ in the main entry point. 
+
+ The base class will always attempt to shut down the process if an Error
+ was raised; the behavior on a standard Exception, raised outside 
+ process shutdown, is simply to log it. 
+
+ (Based on the class {@code YarnUncaughtExceptionHandler})]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.service.launcher.HadoopUncaughtExceptionHandler -->
+  <!-- start interface org.apache.hadoop.service.launcher.LaunchableService -->
+  <interface name="LaunchableService"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.service.Service"/>
+    <method name="bindArgs" return="org.apache.hadoop.conf.Configuration"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="config" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="args" type="java.util.List"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[Propagate the command line arguments.
+ <p>
+ This method is called before {@link #init(Configuration)};
+ Any non-null configuration that is returned from this operation
+ becomes the one that is passed on to that {@link #init(Configuration)}
+ operation.
+ <p>
+ This permits implementations to change the configuration before
+ the init operation. As the ServiceLauncher only creates
+ an instance of the base {@link Configuration} class, it is
+ recommended to instantiate any subclass (such as YarnConfiguration)
+ that injects new resources.
+ <p>
+ @param config the initial configuration build up by the
+ service launcher.
+ @param args list of arguments passed to the command line
+ after any launcher-specific commands have been stripped.
+ @return the configuration to init the service with.
+ Recommended: pass down the config parameter with any changes
+ @throws Exception any problem]]>
+      </doc>
+    </method>
+    <method name="execute" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[Run a service. This method is called after {@link Service#start()}.
+ <p>
+ The return value becomes the exit code of the launched process.
+ <p>
+ If an exception is raised, the policy is:
+ <ol>
+   <li>Any subset of {@link org.apache.hadoop.util.ExitUtil.ExitException}:
+   the exception is passed up unmodified.
+   </li>
+   <li>Any exception which implements
+   {@link org.apache.hadoop.util.ExitCodeProvider}:
+   A new {@link ServiceLaunchException} is created with the exit code
+   and message of the thrown exception; the thrown exception becomes the
+   cause.</li>
+   <li>Any other exception: a new {@link ServiceLaunchException} is created
+   with the exit code {@link LauncherExitCodes#EXIT_EXCEPTION_THROWN} and
+   the message of the original exception (which becomes the cause).</li>
+ </ol>
+ @return the exit code
+ @throws org.apache.hadoop.util.ExitUtil.ExitException an exception passed
+  up as the exit code and error text.
+ @throws Exception any exception to report. If it provides an exit code
+ this is used in a wrapping exception.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[An interface which services can implement to have their
+ execution managed by the ServiceLauncher.
+ <p>
+ The command line options will be passed down before the 
+ {@link Service#init(Configuration)} operation is invoked via an
+ invocation of {@link LaunchableService#bindArgs(Configuration, List)}
+ After the service has been successfully started via {@link Service#start()}
+ the {@link LaunchableService#execute()} method is called to execute the 
+ service. When this method returns, the service launcher will exit, using
+ the return code from the method as its exit option.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.service.launcher.LaunchableService -->
+  <!-- start interface org.apache.hadoop.service.launcher.LauncherExitCodes -->
+  <interface name="LauncherExitCodes"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <field name="EXIT_SUCCESS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Success: {@value}.]]>
+      </doc>
+    </field>
+    <field name="EXIT_FAIL" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Generic "false/fail" response: {@value}.
+ The operation worked but the result was not "true" from the viewpoint
+ of the executed code.]]>
+      </doc>
+    </field>
+    <field name="EXIT_CLIENT_INITIATED_SHUTDOWN" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Exit code when a client requested service termination: {@value}.]]>
+      </doc>
+    </field>
+    <field name="EXIT_TASK_LAUNCH_FAILURE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Exit code when targets could not be launched: {@value}.]]>
+      </doc>
+    </field>
+    <field name="EXIT_INTERRUPTED" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Exit code when a control-C, kill -3, signal was picked up: {@value}.]]>
+      </doc>
+    </field>
+    <field name="EXIT_OTHER_FAILURE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Exit code when something happened but we can't be specific: {@value}.]]>
+      </doc>
+    </field>
+    <field name="EXIT_COMMAND_ARGUMENT_ERROR" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Exit code when the command line doesn't parse: {@value}, or
+ when it is otherwise invalid.
+ <p>
+ Approximate HTTP equivalent: {@code 400 Bad Request}]]>
+      </doc>
+    </field>
+    <field name="EXIT_UNAUTHORIZED" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The request requires user authentication: {@value}.
+ <p>
+ approximate HTTP equivalent: Approximate HTTP equivalent: {@code 401 Unauthorized}]]>
+      </doc>
+    </field>
+    <field name="EXIT_USAGE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Exit code when a usage message was printed: {@value}.]]>
+      </doc>
+    </field>
+    <field name="EXIT_FORBIDDEN" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Forbidden action: {@value}.
+ <p>
+ Approximate HTTP equivalent: Approximate HTTP equivalent: {@code 403: Forbidden}]]>
+      </doc>
+    </field>
+    <field name="EXIT_NOT_FOUND" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Something was not found: {@value}.
+ <p>
+ Approximate HTTP equivalent: {@code 404: Not Found}]]>
+      </doc>
+    </field>
+    <field name="EXIT_OPERATION_NOT_ALLOWED" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The operation is not allowed: {@value}.
+ <p>
+ Approximate HTTP equivalent: {@code 405: Not allowed}]]>
+      </doc>
+    </field>
+    <field name="EXIT_NOT_ACCEPTABLE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The command is somehow not acceptable: {@value}.
+ <p>
+ Approximate HTTP equivalent: {@code 406: Not Acceptable}]]>
+      </doc>
+    </field>
+    <field name="EXIT_CONNECTIVITY_PROBLEM" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Exit code on connectivity problems: {@value}.
+ <p>
+ Approximate HTTP equivalent: {@code 408: Request Timeout}]]>
+      </doc>
+    </field>
+    <field name="EXIT_BAD_CONFIGURATION" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Exit code when the configurations in valid/incomplete: {@value}.
+ <p>
+ Approximate HTTP equivalent: {@code 409: Conflict}]]>
+      </doc>
+    </field>
+    <field name="EXIT_EXCEPTION_THROWN" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Exit code when an exception was thrown from the service: {@value}.
+ <p>
+ Approximate HTTP equivalent: {@code 500 Internal Server Error}]]>
+      </doc>
+    </field>
+    <field name="EXIT_UNIMPLEMENTED" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Unimplemented feature: {@value}.
+ <p>
+ Approximate HTTP equivalent: {@code 501: Not Implemented}]]>
+      </doc>
+    </field>
+    <field name="EXIT_SERVICE_UNAVAILABLE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Service Unavailable; it may be available later: {@value}.
+ <p>
+ Approximate HTTP equivalent: {@code 503 Service Unavailable}]]>
+      </doc>
+    </field>
+    <field name="EXIT_UNSUPPORTED_VERSION" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The application does not support, or refuses to support this
+ version: {@value}.
+ <p>
+ If raised, this is expected to be raised server-side and likely due
+ to client/server version incompatibilities.
+ <p>
+ Approximate HTTP equivalent: {@code 505: Version Not Supported}]]>
+      </doc>
+    </field>
+    <field name="EXIT_SERVICE_CREATION_FAILURE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The service instance could not be created: {@value}.]]>
+      </doc>
+    </field>
+    <field name="EXIT_SERVICE_LIFECYCLE_EXCEPTION" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The service instance could not be created: {@value}.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[Common Exit codes.
+ <p>
+ Codes with a YARN prefix are YARN-related.
+ <p>
+ Many of the exit codes are designed to resemble HTTP error codes,
+ squashed into a single byte. e.g 44 , "not found" is the equivalent
+ of 404. The various 2XX HTTP error codes aren't followed;
+ the Unix standard of "0" for success is used.
+ <pre>
+    0-10: general command issues
+   30-39: equivalent to the 3XX responses, where those responses are
+          considered errors by the application.
+   40-49: client-side/CLI/config problems
+   50-59: service-side problems.
+   60+  : application specific error codes
+ </pre>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.service.launcher.LauncherExitCodes -->
+  <!-- start class org.apache.hadoop.service.launcher.ServiceLaunchException -->
+  <class name="ServiceLaunchException" extends="org.apache.hadoop.util.ExitUtil.ExitException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.util.ExitCodeProvider"/>
+    <implements name="org.apache.hadoop.service.launcher.LauncherExitCodes"/>
+    <constructor name="ServiceLaunchException" type="int, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an exception with the specific exit code.
+ @param exitCode exit code
+ @param cause cause of the exception]]>
+      </doc>
+    </constructor>
+    <constructor name="ServiceLaunchException" type="int, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an exception with the specific exit code and text.
+ @param exitCode exit code
+ @param message message to use in exception]]>
+      </doc>
+    </constructor>
+    <constructor name="ServiceLaunchException" type="int, java.lang.String, java.lang.Object[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a formatted exception.
+ <p>
+ This uses {@link String#format(String, Object...)}
+ to build the formatted exception in the ENGLISH locale.
+ <p>
+ If the last argument is a throwable, it becomes the cause of the exception.
+ It will also be used as a parameter for the format.
+ @param exitCode exit code
+ @param format format for message to use in exception
+ @param args list of arguments]]>
+      </doc>
+    </constructor>
+    <constructor name="ServiceLaunchException" type="int, java.lang.Throwable, java.lang.String, java.lang.Object[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a formatted exception.
+ <p>
+ This uses {@link String#format(String, Object...)}
+ to build the formatted exception in the ENGLISH locale.
+ @param exitCode exit code
+ @param cause inner cause
+ @param format format for message to use in exception
+ @param args list of arguments]]>
+      </doc>
+    </constructor>
+    <doc>
+    <![CDATA[A service launch exception that includes an exit code.
+ <p>
+ When caught by the ServiceLauncher, it will convert that
+ into a process exit code.
+ 
+ The {@link #ServiceLaunchException(int, String, Object...)} constructor
+ generates formatted exceptions.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.service.launcher.ServiceLaunchException -->
+</package>
+<package name="org.apache.hadoop.tools">
+</package>
+<package name="org.apache.hadoop.tools.protocolPB">
+</package>
+<package name="org.apache.hadoop.tracing">
+</package>
+<package name="org.apache.hadoop.util">
+  <!-- start class org.apache.hadoop.util.ApplicationClassLoader -->
+  <class name="ApplicationClassLoader" extends="java.net.URLClassLoader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ApplicationClassLoader" type="java.net.URL[], java.lang.ClassLoader, java.util.List"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ApplicationClassLoader" type="java.lang.String, java.lang.ClassLoader, java.util.List"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="MalformedURLException" type="java.net.MalformedURLException"/>
+    </constructor>
+    <method name="getResource" return="java.net.URL"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="loadClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+    </method>
+    <method name="loadClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="resolve" type="boolean"/>
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+    </method>
+    <method name="isSystemClass" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="systemClasses" type="java.util.List"/>
+      <doc>
+      <![CDATA[Checks if a class should be included as a system class.
+
+ A class is a system class if and only if it matches one of the positive
+ patterns and none of the negative ones.
+
+ @param name the class name to check
+ @param systemClasses a list of system class configurations.
+ @return true if the class is a system class]]>
+      </doc>
+    </method>
+    <field name="SYSTEM_CLASSES_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value of the system classes if the user did not override them.
+ JDK classes, hadoop classes and resources, and some select third-party
+ classes are considered system classes, and are not loaded by the
+ application classloader.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[A {@link URLClassLoader} for application isolation. Classes from the
+ application JARs are loaded in preference to the parent loader.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.ApplicationClassLoader -->
+  <!-- start class org.apache.hadoop.util.DurationInfo -->
+  <class name="DurationInfo" extends="org.apache.hadoop.util.OperationDuration"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.AutoCloseable"/>
+    <constructor name="DurationInfo" type="org.slf4j.Logger, java.lang.String, java.lang.Object[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create the duration text from a {@code String.format()} code call;
+ log output at info level.
+ @param log log to write to
+ @param format format string
+ @param args list of arguments]]>
+      </doc>
+    </constructor>
+    <constructor name="DurationInfo" type="org.slf4j.Logger, boolean, java.lang.String, java.lang.Object[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create the duration text from a {@code String.format()} code call
+ and log either at info or debug.
+ @param log log to write to
+ @param logAtInfo should the log be at info, rather than debug
+ @param format format string
+ @param args list of arguments]]>
+      </doc>
+    </constructor>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A duration with logging of final state at info or debug
+ in the {@code close()} call.
+ This allows it to be used in a try-with-resources clause, and have the
+ duration automatically logged.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.DurationInfo -->
+  <!-- start interface org.apache.hadoop.util.IPList -->
+  <interface name="IPList"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="isIn" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ipAddress" type="java.lang.String"/>
+      <doc>
+      <![CDATA[returns true if the ipAddress is in the IPList.
+ @param ipAddress
+ @return boolean value indicating whether the ipAddress is in the IPList]]>
+      </doc>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.util.IPList -->
+  <!-- start class org.apache.hadoop.util.OperationDuration -->
+  <class name="OperationDuration" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="OperationDuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Instantiate.
+ The start time and finished time are both set
+ to the current clock time.]]>
+      </doc>
+    </constructor>
+    <method name="time" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Evaluate the system time.
+ @return the current clock time.]]>
+      </doc>
+    </method>
+    <method name="finished"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Update the finished time with the current system time.]]>
+      </doc>
+    </method>
+    <method name="getDurationString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the duration as {@link #humanTime(long)}.
+ @return a printable duration.]]>
+      </doc>
+    </method>
+    <method name="humanTime" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="time" type="long"/>
+      <doc>
+      <![CDATA[Convert to a human time of minutes:seconds.millis.
+ @param time time to humanize.
+ @return a printable value.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the duration as {@link #humanTime(long)}.
+ @return a printable duration.]]>
+      </doc>
+    </method>
+    <method name="value" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the duration in milliseconds.
+ <p></p>
+ This will be 0 until a call
+ to {@link #finished()} has been made.
+ @return the currently recorded duration.]]>
+      </doc>
+    </method>
+    <method name="asDuration" return="java.time.Duration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the duration of an operation as a java Duration
+ instance.
+ @return a duration.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Little duration counter.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.OperationDuration -->
+  <!-- start interface org.apache.hadoop.util.Progressable -->
+  <interface name="Progressable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="progress"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Report progress to the Hadoop framework.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A facility for reporting progress.
+ 
+ <p>Clients and/or applications can use the provided <code>Progressable</code>
+ to explicitly report progress to the Hadoop framework. This is especially
+ important for operations which take significant amount of time since,
+ in-lieu of the reported progress, the framework has to assume that an error
+ has occurred and time-out the operation.</p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.util.Progressable -->
+  <!-- start class org.apache.hadoop.util.PureJavaCrc32 -->
+  <class name="PureJavaCrc32" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.zip.Checksum"/>
+    <constructor name="PureJavaCrc32"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new PureJavaCrc32 object.]]>
+      </doc>
+    </constructor>
+    <method name="getValue" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="update"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="offset" type="int"/>
+      <param name="len" type="int"/>
+    </method>
+    <method name="update"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="int"/>
+    </method>
+    <doc>
+    <![CDATA[A pure-java implementation of the CRC32 checksum that uses
+ the same polynomial as the built-in native CRC32.
+
+ This is to avoid the JNI overhead for certain uses of Checksumming
+ where many small pieces of data are checksummed in succession.
+
+ The current version is ~10x to 1.8x as fast as Sun's native
+ java.util.zip.CRC32 in Java 1.6
+
+ @see java.util.zip.CRC32]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.PureJavaCrc32 -->
+  <!-- start class org.apache.hadoop.util.PureJavaCrc32C -->
+  <class name="PureJavaCrc32C" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.zip.Checksum"/>
+    <constructor name="PureJavaCrc32C"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new PureJavaCrc32 object.]]>
+      </doc>
+    </constructor>
+    <method name="getValue" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="update"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="off" type="int"/>
+      <param name="len" type="int"/>
+    </method>
+    <method name="update"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="b" type="int"/>
+    </method>
+    <doc>
+    <![CDATA[A pure-java implementation of the CRC32 checksum that uses
+ the CRC32-C polynomial, the same polynomial used by iSCSI
+ and implemented on many Intel chipsets supporting SSE4.2.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.PureJavaCrc32C -->
+  <!-- start class org.apache.hadoop.util.ReflectionUtils -->
+  <class name="ReflectionUtils" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReflectionUtils"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theObject" type="java.lang.Object"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Check and set 'configuration' if necessary.
+ 
+ @param theObject object for which to set configuration
+ @param conf Configuration]]>
+      </doc>
+    </method>
+    <method name="newInstance" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Create an object for the given class and initialize it from conf
+ 
+ @param theClass class of which an object is created
+ @param conf Configuration
+ @return a new object]]>
+      </doc>
+    </method>
+    <method name="setContentionTracing"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="boolean"/>
+    </method>
+    <method name="printThreadInfo"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="stream" type="java.io.PrintStream"/>
+      <param name="title" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Print all of the thread's information and stack traces.
+ 
+ @param stream the stream to
+ @param title a string title for the stack trace]]>
+      </doc>
+    </method>
+    <method name="logThreadInfo"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="log" type="org.apache.commons.logging.Log"/>
+      <param name="title" type="java.lang.String"/>
+      <param name="minInterval" type="long"/>
+      <doc>
+      <![CDATA[Log the current thread stacks at INFO level.
+ @param log the logger that logs the stack trace
+ @param title a descriptive title for the call stacks
+ @param minInterval the minimum time from the last]]>
+      </doc>
+    </method>
+    <method name="logThreadInfo"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="log" type="org.slf4j.Logger"/>
+      <param name="title" type="java.lang.String"/>
+      <param name="minInterval" type="long"/>
+      <doc>
+      <![CDATA[Log the current thread stacks at INFO level.
+ @param log the logger that logs the stack trace
+ @param title a descriptive title for the call stacks
+ @param minInterval the minimum time from the last]]>
+      </doc>
+    </method>
+    <method name="getClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="T"/>
+      <doc>
+      <![CDATA[Return the correctly-typed {@link Class} of the given object.
+  
+ @param o object whose correctly-typed <code>Class</code> is to be obtained
+ @return the correctly typed <code>Class</code> of the given object.]]>
+      </doc>
+    </method>
+    <method name="copy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="src" type="T"/>
+      <param name="dst" type="T"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Make a copy of the writable object using serialization to a buffer
+ @param src the object to copy from
+ @param dst the object to copy into, which is destroyed
+ @return dst param (the copy)
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="cloneWritableInto"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dst" type="org.apache.hadoop.io.Writable"/>
+      <param name="src" type="org.apache.hadoop.io.Writable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getDeclaredFieldsIncludingInherited" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="clazz" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Gets all the declared fields of a class including fields declared in
+ superclasses.]]>
+      </doc>
+    </method>
+    <method name="getDeclaredMethodsIncludingInherited" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="clazz" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Gets all the declared methods of a class including methods declared in
+ superclasses.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[General reflection utils]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.ReflectionUtils -->
+  <!-- start class org.apache.hadoop.util.Shell -->
+  <class name="Shell" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Shell"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an instance with no minimum interval between runs; stderr is
+ not merged with stdout.]]>
+      </doc>
+    </constructor>
+    <constructor name="Shell" type="long"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an instance with a minimum interval between executions; stderr is
+ not merged with stdout.
+ @param interval interval in milliseconds between command executions.]]>
+      </doc>
+    </constructor>
+    <constructor name="Shell" type="long, boolean"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a shell instance which can be re-executed when the {@link #run()}
+ method is invoked with a given elapsed time between calls.
+
+ @param interval the minimum duration in milliseconds to wait before
+        re-executing the command. If set to 0, there is no minimum.
+ @param redirectErrorStream should the error stream be merged with
+        the normal output stream?]]>
+      </doc>
+    </constructor>
+    <method name="isJava7OrAbove" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="This call isn't needed any more: please remove uses of it.">
+      <doc>
+      <![CDATA[query to see if system is Java 7 or later.
+ Now that Hadoop requires Java 7 or later, this always returns true.
+ @deprecated This call isn't needed any more: please remove uses of it.
+ @return true, always.]]>
+      </doc>
+    </method>
+    <method name="isJavaVersionAtLeast" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="version" type="int"/>
+      <doc>
+      <![CDATA[Query to see if major version of Java specification of the system
+ is equal or greater than the parameter.
+
+ @param version 8, 9, 10 etc.
+ @return comparison with system property, always true for 8]]>
+      </doc>
+    </method>
+    <method name="checkWindowsCommandLineLength"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="commands" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Checks if a given command (String[]) fits in the Windows maximum command
+ line length Note that the input is expected to already include space
+ delimiters, no extra count will be added for delimiters.
+
+ @param commands command parts, including any space delimiters]]>
+      </doc>
+    </method>
+    <method name="getGroupsCommand" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[a Unix command to get the current user's groups list.]]>
+      </doc>
+    </method>
+    <method name="getGroupsForUserCommand" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <doc>
+      <![CDATA[A command to get a given user's groups list.
+ If the OS is not WINDOWS, the command will get the user's primary group
+ first and finally get the groups list which includes the primary group.
+ i.e. the user's primary group will be included twice.]]>
+      </doc>
+    </method>
+    <method name="getGroupsIDForUserCommand" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <doc>
+      <![CDATA[A command to get a given user's group id list.
+ The command will get the user's primary group
+ first and finally get the groups list which includes the primary group.
+ i.e. the user's primary group will be included twice.
+ This command does not support Windows and will only return group names.]]>
+      </doc>
+    </method>
+    <method name="getUsersForNetgroupCommand" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="netgroup" type="java.lang.String"/>
+      <doc>
+      <![CDATA[A command to get a given netgroup's user list.]]>
+      </doc>
+    </method>
+    <method name="getGetPermissionCommand" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return a command to get permission information.]]>
+      </doc>
+    </method>
+    <method name="getSetPermissionCommand" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="perm" type="java.lang.String"/>
+      <param name="recursive" type="boolean"/>
+      <doc>
+      <![CDATA[Return a command to set permission.]]>
+      </doc>
+    </method>
+    <method name="getSetPermissionCommand" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="perm" type="java.lang.String"/>
+      <param name="recursive" type="boolean"/>
+      <param name="file" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Return a command to set permission for specific file.
+
+ @param perm String permission to set
+ @param recursive boolean true to apply to all sub-directories recursively
+ @param file String file to set
+ @return String[] containing command and arguments]]>
+      </doc>
+    </method>
+    <method name="getSetOwnerCommand" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="owner" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Return a command to set owner.]]>
+      </doc>
+    </method>
+    <method name="getSymlinkCommand" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="target" type="java.lang.String"/>
+      <param name="link" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Return a command to create symbolic links.]]>
+      </doc>
+    </method>
+    <method name="getReadlinkCommand" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="link" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Return a command to read the target of the a symbolic link.]]>
+      </doc>
+    </method>
+    <method name="getCheckProcessIsAliveCommand" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pid" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Return a command for determining if process with specified pid is alive.
+ @param pid process ID
+ @return a <code>kill -0</code> command or equivalent]]>
+      </doc>
+    </method>
+    <method name="getSignalKillCommand" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="code" type="int"/>
+      <param name="pid" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Return a command to send a signal to a given pid.]]>
+      </doc>
+    </method>
+    <method name="getEnvironmentVariableRegex" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return a regular expression string that match environment variables.]]>
+      </doc>
+    </method>
+    <method name="appendScriptExtension" return="java.io.File"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="parent" type="java.io.File"/>
+      <param name="basename" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns a File referencing a script with the given basename, inside the
+ given parent directory.  The file extension is inferred by platform:
+ <code>".cmd"</code> on Windows, or <code>".sh"</code> otherwise.
+
+ @param parent File parent directory
+ @param basename String script file basename
+ @return File referencing the script in the directory]]>
+      </doc>
+    </method>
+    <method name="appendScriptExtension" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="basename" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns a script file name with the given basename.
+
+ The file extension is inferred by platform:
+ <code>".cmd"</code> on Windows, or <code>".sh"</code> otherwise.
+
+ @param basename String script file basename
+ @return String script file name]]>
+      </doc>
+    </method>
+    <method name="getRunScriptCommand" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="script" type="java.io.File"/>
+      <doc>
+      <![CDATA[Returns a command to run the given script.  The script interpreter is
+ inferred by platform: cmd on Windows or bash otherwise.
+
+ @param script File script to run
+ @return String[] command to run the script]]>
+      </doc>
+    </method>
+    <method name="getHadoopHome" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the Hadoop home directory. Raises an exception if not found
+ @return the home dir
+ @throws IOException if the home directory cannot be located.]]>
+      </doc>
+    </method>
+    <method name="getQualifiedBin" return="java.io.File"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="executable" type="java.lang.String"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <doc>
+      <![CDATA[Fully qualify the path to a binary that should be in a known hadoop
+  bin location. This is primarily useful for disambiguating call-outs
+  to executable sub-components of Hadoop to avoid clashes with other
+  executables that may be in the path.  Caveat:  this call doesn't
+  just format the path to the bin directory.  It also checks for file
+  existence of the composed path. The output of this call should be
+  cached by callers.
+
+ @param executable executable
+ @return executable file reference
+ @throws FileNotFoundException if the path does not exist]]>
+      </doc>
+    </method>
+    <method name="getQualifiedBinPath" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="executable" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Fully qualify the path to a binary that should be in a known hadoop
+  bin location. This is primarily useful for disambiguating call-outs
+  to executable sub-components of Hadoop to avoid clashes with other
+  executables that may be in the path.  Caveat:  this call doesn't
+  just format the path to the bin directory.  It also checks for file
+  existence of the composed path. The output of this call should be
+  cached by callers.
+
+ @param executable executable
+ @return executable file reference
+ @throws FileNotFoundException if the path does not exist
+ @throws IOException on path canonicalization failures]]>
+      </doc>
+    </method>
+    <method name="hasWinutilsPath" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Predicate to indicate whether or not the path to winutils is known.
+
+ If true, then {@link #WINUTILS} is non-null, and both
+ {@link #getWinUtilsPath()} and {@link #getWinUtilsFile()}
+ will successfully return this value. Always false on non-windows systems.
+ @return true if there is a valid path to the binary]]>
+      </doc>
+    </method>
+    <method name="getWinUtilsPath" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Locate the winutils binary, or fail with a meaningful
+ exception and stack trace as an RTE.
+ This method is for use in methods which don't explicitly throw
+ an <code>IOException</code>.
+ @return the path to {@link #WINUTILS_EXE}
+ @throws RuntimeException if the path is not resolvable]]>
+      </doc>
+    </method>
+    <method name="getWinUtilsFile" return="java.io.File"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <doc>
+      <![CDATA[Get a file reference to winutils.
+ Always raises an exception if there isn't one
+ @return the file instance referring to the winutils bin.
+ @throws FileNotFoundException on any failure to locate that file.]]>
+      </doc>
+    </method>
+    <method name="checkIsBashSupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="InterruptedIOException" type="java.io.InterruptedIOException"/>
+    </method>
+    <method name="setEnvironment"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="env" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Set the environment for the command.
+ @param env Mapping of environment variables]]>
+      </doc>
+    </method>
+    <method name="setWorkingDirectory"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="dir" type="java.io.File"/>
+      <doc>
+      <![CDATA[Set the working directory.
+ @param dir The directory where the command will be executed]]>
+      </doc>
+    </method>
+    <method name="run"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Check to see if a command needs to be executed and execute if needed.]]>
+      </doc>
+    </method>
+    <method name="getExecString" return="java.lang.String[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[return an array containing the command name and its parameters.]]>
+      </doc>
+    </method>
+    <method name="parseExecResult"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="lines" type="java.io.BufferedReader"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Parse the execution result]]>
+      </doc>
+    </method>
+    <method name="getEnvironment" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="env" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get an environment variable.
+ @param env the environment var
+ @return the value or null if it was unset.]]>
+      </doc>
+    </method>
+    <method name="getProcess" return="java.lang.Process"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[get the current sub-process executing the given command.
+ @return process executing the command]]>
+      </doc>
+    </method>
+    <method name="getExitCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[get the exit code.
+ @return the exit code of the process]]>
+      </doc>
+    </method>
+    <method name="getWaitingThread" return="java.lang.Thread"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[get the thread that is waiting on this instance of <code>Shell</code>.
+ @return the thread that ran runCommand() that spawned this shell
+ or null if no thread is waiting for this shell to complete]]>
+      </doc>
+    </method>
+    <method name="isTimedOut" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[To check if the passed script to shell command executor timed out or
+ not.
+
+ @return if the script timed out.]]>
+      </doc>
+    </method>
+    <method name="execCommand" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cmd" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Static method to execute a shell command.
+ Covers most of the simple cases without requiring the user to implement
+ the <code>Shell</code> interface.
+ @param cmd shell command to execute.
+ @return the output of the executed command.]]>
+      </doc>
+    </method>
+    <method name="execCommand" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="env" type="java.util.Map"/>
+      <param name="cmd" type="java.lang.String[]"/>
+      <param name="timeout" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Static method to execute a shell command.
+ Covers most of the simple cases without requiring the user to implement
+ the <code>Shell</code> interface.
+ @param env the map of environment key=value
+ @param cmd shell command to execute.
+ @param timeout time in milliseconds after which script should be marked timeout
+ @return the output of the executed command.
+ @throws IOException on any problem.]]>
+      </doc>
+    </method>
+    <method name="execCommand" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="env" type="java.util.Map"/>
+      <param name="cmd" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Static method to execute a shell command.
+ Covers most of the simple cases without requiring the user to implement
+ the <code>Shell</code> interface.
+ @param env the map of environment key=value
+ @param cmd shell command to execute.
+ @return the output of the executed command.
+ @throws IOException on any problem.]]>
+      </doc>
+    </method>
+    <method name="destroyAllShellProcesses"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Static method to destroy all running <code>Shell</code> processes.
+ Iterates through a map of all currently running <code>Shell</code>
+ processes and destroys them one by one. This method is thread safe]]>
+      </doc>
+    </method>
+    <method name="getAllShells" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Static method to return a Set of all <code>Shell</code> objects.]]>
+      </doc>
+    </method>
+    <method name="getMemlockLimit" return="java.lang.Long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ulimit" type="java.lang.Long"/>
+      <doc>
+      <![CDATA[Static method to return the memory lock limit for datanode.
+ @param ulimit max value at which memory locked should be capped.
+ @return long value specifying the memory lock limit.]]>
+      </doc>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SYSPROP_HADOOP_HOME_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[System property for the Hadoop home directory: {@value}.]]>
+      </doc>
+    </field>
+    <field name="ENV_HADOOP_HOME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Environment variable for Hadoop's home dir: {@value}.]]>
+      </doc>
+    </field>
+    <field name="WINDOWS_MAX_SHELL_LENGTH" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Maximum command line length in Windows
+ KB830473 documents this as 8191]]>
+      </doc>
+    </field>
+    <field name="WINDOWS_MAX_SHELL_LENGHT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="use the correctly spelled constant.">
+      <doc>
+      <![CDATA[mis-spelling of {@link #WINDOWS_MAX_SHELL_LENGTH}.
+ @deprecated use the correctly spelled constant.]]>
+      </doc>
+    </field>
+    <field name="USER_NAME_COMMAND" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[a Unix command to get the current user's name: {@value}.]]>
+      </doc>
+    </field>
+    <field name="WindowsProcessLaunchLock" type="java.lang.Object"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Windows <code>CreateProcess</code> synchronization object.]]>
+      </doc>
+    </field>
+    <field name="osType" type="org.apache.hadoop.util.Shell.OSType"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the type of the operating system, as determined from parsing
+ the <code>os.name</code> property.]]>
+      </doc>
+    </field>
+    <field name="WINDOWS" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SOLARIS" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MAC" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FREEBSD" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LINUX" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="OTHER" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="PPC_64" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ENV_NAME_REGEX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Regular expression for environment variables: {@value}.]]>
+      </doc>
+    </field>
+    <field name="SET_PERMISSION_COMMAND" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[a Unix command to set permission: {@value}.]]>
+      </doc>
+    </field>
+    <field name="SET_OWNER_COMMAND" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[a Unix command to set owner: {@value}.]]>
+      </doc>
+    </field>
+    <field name="SET_GROUP_COMMAND" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[a Unix command to set the change user's groups list: {@value}.]]>
+      </doc>
+    </field>
+    <field name="LINK_COMMAND" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[a Unix command to create a link: {@value}.]]>
+      </doc>
+    </field>
+    <field name="READ_LINK_COMMAND" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[a Unix command to get a link target: {@value}.]]>
+      </doc>
+    </field>
+    <field name="timeOutInterval" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Time after which the executing script would be timedout.]]>
+      </doc>
+    </field>
+    <field name="inheritParentEnv" type="boolean"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Indicates if the parent env vars should be inherited or not]]>
+      </doc>
+    </field>
+    <field name="WINUTILS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="use one of the exception-raising getter methods,
+ specifically {@link #getWinUtilsPath()} or {@link #getWinUtilsFile()}">
+      <doc>
+      <![CDATA[Location of winutils as a string; null if not found.
+ <p>
+ <i>Important: caller must check for this value being null</i>.
+ The lack of such checks has led to many support issues being raised.
+ <p>
+ @deprecated use one of the exception-raising getter methods,
+ specifically {@link #getWinUtilsPath()} or {@link #getWinUtilsFile()}]]>
+      </doc>
+    </field>
+    <field name="isSetsidAvailable" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Flag which is true if setsid exists.]]>
+      </doc>
+    </field>
+    <field name="TOKEN_SEPARATOR_REGEX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Token separator regex used to parse Shell tool outputs.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[A base class for running a Shell command.
+
+ <code>Shell</code> can be used to run shell commands like <code>du</code> or
+ <code>df</code>. It also offers facilities to gate commands by
+ time-intervals.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.Shell -->
+  <!-- start class org.apache.hadoop.util.ShutdownHookManager -->
+  <class name="ShutdownHookManager" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="get" return="org.apache.hadoop.util.ShutdownHookManager"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return <code>ShutdownHookManager</code> singleton.
+
+ @return <code>ShutdownHookManager</code> singleton.]]>
+      </doc>
+    </method>
+    <method name="addShutdownHook"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="shutdownHook" type="java.lang.Runnable"/>
+      <param name="priority" type="int"/>
+      <doc>
+      <![CDATA[Adds a shutdownHook with a priority, the higher the priority
+ the earlier will run. ShutdownHooks with same priority run
+ in a non-deterministic order.
+
+ @param shutdownHook shutdownHook <code>Runnable</code>
+ @param priority priority of the shutdownHook.]]>
+      </doc>
+    </method>
+    <method name="addShutdownHook"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="shutdownHook" type="java.lang.Runnable"/>
+      <param name="priority" type="int"/>
+      <param name="timeout" type="long"/>
+      <param name="unit" type="java.util.concurrent.TimeUnit"/>
+      <doc>
+      <![CDATA[Adds a shutdownHook with a priority and timeout the higher the priority
+ the earlier will run. ShutdownHooks with same priority run
+ in a non-deterministic order. The shutdown hook will be terminated if it
+ has not been finished in the specified period of time.
+
+ @param shutdownHook shutdownHook <code>Runnable</code>
+ @param priority priority of the shutdownHook
+ @param timeout timeout of the shutdownHook
+ @param unit unit of the timeout <code>TimeUnit</code>]]>
+      </doc>
+    </method>
+    <method name="removeShutdownHook" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="shutdownHook" type="java.lang.Runnable"/>
+      <doc>
+      <![CDATA[Removes a shutdownHook.
+
+ @param shutdownHook shutdownHook to remove.
+ @return TRUE if the shutdownHook was registered and removed,
+ FALSE otherwise.]]>
+      </doc>
+    </method>
+    <method name="hasShutdownHook" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="shutdownHook" type="java.lang.Runnable"/>
+      <doc>
+      <![CDATA[Indicates if a shutdownHook is registered or not.
+
+ @param shutdownHook shutdownHook to check if registered.
+ @return TRUE/FALSE depending if the shutdownHook is is registered.]]>
+      </doc>
+    </method>
+    <method name="isShutdownInProgress" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Indicates if shutdown is in progress or not.
+ 
+ @return TRUE if the shutdown is in progress, otherwise FALSE.]]>
+      </doc>
+    </method>
+    <method name="clearShutdownHooks"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[clear all registered shutdownHooks.]]>
+      </doc>
+    </method>
+    <field name="TIMEOUT_MINIMUM" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Minimum shutdown timeout: {@value} second(s).]]>
+      </doc>
+    </field>
+    <field name="TIME_UNIT_DEFAULT" type="java.util.concurrent.TimeUnit"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default time unit used: seconds.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[The <code>ShutdownHookManager</code> enables running shutdownHook
+ in a deterministic order, higher priority first.
+ <p>
+ The JVM runs ShutdownHooks in a non-deterministic order or in parallel.
+ This class registers a single JVM shutdownHook and run all the
+ shutdownHooks registered to it (to this class) in order based on their
+ priority.
+
+ Unless a hook was registered with a shutdown explicitly set through
+ {@link #addShutdownHook(Runnable, int, long, TimeUnit)},
+ the shutdown time allocated to it is set by the configuration option
+ {@link CommonConfigurationKeysPublic#SERVICE_SHUTDOWN_TIMEOUT} in
+ {@code core-site.xml}, with a default value of
+ {@link CommonConfigurationKeysPublic#SERVICE_SHUTDOWN_TIMEOUT_DEFAULT}
+ seconds.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.ShutdownHookManager -->
+  <!-- start class org.apache.hadoop.util.StringInterner -->
+  <class name="StringInterner" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="StringInterner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="strongIntern" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="sample" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Interns and returns a reference to the representative instance 
+ for any of a collection of string instances that are equal to each other.
+ Retains strong reference to the instance, 
+ thus preventing it from being garbage-collected. 
+ 
+ @param sample string instance to be interned
+ @return strong reference to interned string instance]]>
+      </doc>
+    </method>
+    <method name="weakIntern" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="sample" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Interns and returns a reference to the representative instance 
+ for any of a collection of string instances that are equal to each other.
+ Retains weak reference to the instance, 
+ and so does not prevent it from being garbage-collected.
+ 
+ @param sample string instance to be interned
+ @return weak reference to interned string instance]]>
+      </doc>
+    </method>
+    <method name="internStringsInArray" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="strings" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Interns all the strings in the given array in place,
+ returning the same array.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Provides string interning utility methods. For weak interning,
+ we use the standard String.intern() call, that performs very well
+ (no problems with PermGen overflowing, etc.) starting from JDK 7.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.StringInterner -->
+  <!-- start class org.apache.hadoop.util.SysInfo -->
+  <class name="SysInfo" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SysInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.util.SysInfo"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return default OS instance.
+ @throws UnsupportedOperationException If cannot determine OS.
+ @return Default instance for the detected OS.]]>
+      </doc>
+    </method>
+    <method name="getVirtualMemorySize" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain the total size of the virtual memory present in the system.
+
+ @return virtual memory size in bytes.]]>
+      </doc>
+    </method>
+    <method name="getPhysicalMemorySize" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain the total size of the physical memory present in the system.
+
+ @return physical memory size bytes.]]>
+      </doc>
+    </method>
+    <method name="getAvailableVirtualMemorySize" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain the total size of the available virtual memory present
+ in the system.
+
+ @return available virtual memory size in bytes.]]>
+      </doc>
+    </method>
+    <method name="getAvailablePhysicalMemorySize" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain the total size of the available physical memory present
+ in the system.
+
+ @return available physical memory size bytes.]]>
+      </doc>
+    </method>
+    <method name="getNumProcessors" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain the total number of logical processors present on the system.
+
+ @return number of logical processors]]>
+      </doc>
+    </method>
+    <method name="getNumCores" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain total number of physical cores present on the system.
+
+ @return number of physical cores]]>
+      </doc>
+    </method>
+    <method name="getCpuFrequency" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain the CPU frequency of on the system.
+
+ @return CPU frequency in kHz]]>
+      </doc>
+    </method>
+    <method name="getCumulativeCpuTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain the cumulative CPU time since the system is on.
+
+ @return cumulative CPU time in milliseconds]]>
+      </doc>
+    </method>
+    <method name="getCpuUsagePercentage" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain the CPU usage % of the machine. Return -1 if it is unavailable
+
+ @return CPU usage as a percentage (from 0 to 100) of available cycles.]]>
+      </doc>
+    </method>
+    <method name="getNumVCoresUsed" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain the number of VCores used. Return -1 if it is unavailable
+
+ @return Number of VCores used a percentage (from 0 to #VCores).]]>
+      </doc>
+    </method>
+    <method name="getNetworkBytesRead" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain the aggregated number of bytes read over the network.
+ @return total number of bytes read.]]>
+      </doc>
+    </method>
+    <method name="getNetworkBytesWritten" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain the aggregated number of bytes written to the network.
+ @return total number of bytes written.]]>
+      </doc>
+    </method>
+    <method name="getStorageBytesRead" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain the aggregated number of bytes read from disks.
+
+ @return total number of bytes read.]]>
+      </doc>
+    </method>
+    <method name="getStorageBytesWritten" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain the aggregated number of bytes written to disks.
+
+ @return total number of bytes written.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Plugin to calculate resource information on the system.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.SysInfo -->
+  <!-- start interface org.apache.hadoop.util.Tool -->
+  <interface name="Tool"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <method name="run" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[Execute the command with the given arguments.
+ 
+ @param args command specific arguments.
+ @return exit code.
+ @throws Exception]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A tool interface that supports handling of generic command-line options.
+ 
+ <p><code>Tool</code>, is the standard for any Map-Reduce tool/application. 
+ The tool/application should delegate the handling of 
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/CommandsManual.html#Generic_Options">
+ standard command-line options</a> to {@link ToolRunner#run(Tool, String[])} 
+ and only handle its custom arguments.</p>
+ 
+ <p>Here is how a typical <code>Tool</code> is implemented:</p>
+ <p><blockquote><pre>
+     public class MyApp extends Configured implements Tool {
+     
+       public int run(String[] args) throws Exception {
+         // <code>Configuration</code> processed by <code>ToolRunner</code>
+         Configuration conf = getConf();
+         
+         // Create a JobConf using the processed <code>conf</code>
+         JobConf job = new JobConf(conf, MyApp.class);
+         
+         // Process custom command-line options
+         Path in = new Path(args[1]);
+         Path out = new Path(args[2]);
+         
+         // Specify various job-specific parameters     
+         job.setJobName("my-app");
+         job.setInputPath(in);
+         job.setOutputPath(out);
+         job.setMapperClass(MyMapper.class);
+         job.setReducerClass(MyReducer.class);
+
+         // Submit the job, then poll for progress until the job is complete
+         RunningJob runningJob = JobClient.runJob(job);
+         if (runningJob.isSuccessful()) {
+           return 0;
+         } else {
+           return 1;
+         }
+       }
+       
+       public static void main(String[] args) throws Exception {
+         // Let <code>ToolRunner</code> handle generic command-line options 
+         int res = ToolRunner.run(new Configuration(), new MyApp(), args);
+         
+         System.exit(res);
+       }
+     }
+ </pre></blockquote><p>
+ 
+ @see GenericOptionsParser
+ @see ToolRunner]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.util.Tool -->
+  <!-- start class org.apache.hadoop.util.ToolRunner -->
+  <class name="ToolRunner" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ToolRunner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="run" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="tool" type="org.apache.hadoop.util.Tool"/>
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[Runs the given <code>Tool</code> by {@link Tool#run(String[])}, after 
+ parsing with the given generic arguments. Uses the given 
+ <code>Configuration</code>, or builds one if null.
+ 
+ Sets the <code>Tool</code>'s configuration with the possibly modified 
+ version of the <code>conf</code>.  
+ 
+ @param conf <code>Configuration</code> for the <code>Tool</code>.
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+      </doc>
+    </method>
+    <method name="run" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tool" type="org.apache.hadoop.util.Tool"/>
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[Runs the <code>Tool</code> with its <code>Configuration</code>.
+ 
+ Equivalent to <code>run(tool.getConf(), tool, args)</code>.
+ 
+ @param tool <code>Tool</code> to run.
+ @param args command-line arguments to the tool.
+ @return exit code of the {@link Tool#run(String[])} method.]]>
+      </doc>
+    </method>
+    <method name="printGenericCommandUsage"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.PrintStream"/>
+      <doc>
+      <![CDATA[Prints generic command-line argurments and usage information.
+ 
+  @param out stream to write usage information to.]]>
+      </doc>
+    </method>
+    <method name="confirmPrompt" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="prompt" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Print out a prompt to the user, and return true if the user
+ responds with "y" or "yes". (case insensitive)]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A utility to help run {@link Tool}s.
+ 
+ <p><code>ToolRunner</code> can be used to run classes implementing 
+ <code>Tool</code> interface. It works in conjunction with 
+ {@link GenericOptionsParser} to parse the 
+ <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/CommandsManual.html#Generic_Options">
+ generic hadoop command line arguments</a> and modifies the 
+ <code>Configuration</code> of the <code>Tool</code>. The 
+ application-specific options are passed along without being modified.
+ </p>
+ 
+ @see Tool
+ @see GenericOptionsParser]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.ToolRunner -->
+  <!-- start class org.apache.hadoop.util.VersionInfo -->
+  <class name="VersionInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="VersionInfo" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="_getVersion" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="_getRevision" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="_getBranch" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="_getDate" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="_getUser" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="_getUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="_getSrcChecksum" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="_getBuildVersion" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="_getProtocVersion" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="getVersion" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the Hadoop version.
+ @return the Hadoop version string, eg. "0.6.3-dev"]]>
+      </doc>
+    </method>
+    <method name="getRevision" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the Git commit hash of the repository when compiled.
+ @return the commit hash, eg. "18f64065d5db6208daf50b02c1b5ed4ee3ce547a"]]>
+      </doc>
+    </method>
+    <method name="getBranch" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the branch on which this originated.
+ @return The branch name, e.g. "trunk" or "branches/branch-0.20"]]>
+      </doc>
+    </method>
+    <method name="getDate" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The date that Hadoop was compiled.
+ @return the compilation date in unix date format]]>
+      </doc>
+    </method>
+    <method name="getUser" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The user that compiled Hadoop.
+ @return the username of the user]]>
+      </doc>
+    </method>
+    <method name="getUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the URL for the Hadoop repository.
+ @return the URL of the Hadoop repository]]>
+      </doc>
+    </method>
+    <method name="getSrcChecksum" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the checksum of the source files from which Hadoop was built.
+ @return the checksum of the source files]]>
+      </doc>
+    </method>
+    <method name="getBuildVersion" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the buildVersion which includes version,
+ revision, user and date.
+ @return the buildVersion]]>
+      </doc>
+    </method>
+    <method name="getProtocVersion" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the protoc version used for the build.
+ @return the protoc version]]>
+      </doc>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+    </method>
+    <doc>
+    <![CDATA[This class returns build information about Hadoop components.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.VersionInfo -->
+  <doc>
+  <![CDATA[Common utilities.]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.util.bloom">
+  <!-- start class org.apache.hadoop.util.bloom.BloomFilter -->
+  <class name="BloomFilter" extends="org.apache.hadoop.util.bloom.Filter"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="BloomFilter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor - use with readFields]]>
+      </doc>
+    </constructor>
+    <constructor name="BloomFilter" type="int, int, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor
+ @param vectorSize The vector size of <i>this</i> filter.
+ @param nbHash The number of hash function to consider.
+ @param hashType type of the hashing function (see
+ {@link org.apache.hadoop.util.hash.Hash}).]]>
+      </doc>
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+    </method>
+    <method name="and"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+    </method>
+    <method name="membershipTest" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+    </method>
+    <method name="not"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="or"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+    </method>
+    <method name="xor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getVectorSize" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return size of the the bloomfilter]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Implements a <i>Bloom filter</i>, as defined by Bloom in 1970.
+ <p>
+ The Bloom filter is a data structure that was introduced in 1970 and that has been adopted by 
+ the networking research community in the past decade thanks to the bandwidth efficiencies that it
+ offers for the transmission of set membership information between networked hosts.  A sender encodes 
+ the information into a bit vector, the Bloom filter, that is more compact than a conventional 
+ representation. Computation and space costs for construction are linear in the number of elements.  
+ The receiver uses the filter to test whether various elements are members of the set. Though the 
+ filter will occasionally return a false positive, it will never return a false negative. When creating 
+ the filter, the sender can choose its desired point in a trade-off between the false positive rate and the size. 
+ 
+ <p>
+ Originally created by
+ <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.
+ 
+ @see Filter The general behavior of a filter
+ 
+ @see <a href="http://portal.acm.org/citation.cfm?id=362692&dl=ACM&coll=portal">Space/Time Trade-Offs in Hash Coding with Allowable Errors</a>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.bloom.BloomFilter -->
+  <!-- start class org.apache.hadoop.util.bloom.CountingBloomFilter -->
+  <class name="CountingBloomFilter" extends="org.apache.hadoop.util.bloom.Filter"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CountingBloomFilter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor - use with readFields]]>
+      </doc>
+    </constructor>
+    <constructor name="CountingBloomFilter" type="int, int, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor
+ @param vectorSize The vector size of <i>this</i> filter.
+ @param nbHash The number of hash function to consider.
+ @param hashType type of the hashing function (see
+ {@link org.apache.hadoop.util.hash.Hash}).]]>
+      </doc>
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+    </method>
+    <method name="delete"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+      <doc>
+      <![CDATA[Removes a specified key from <i>this</i> counting Bloom filter.
+ <p>
+ <b>Invariant</b>: nothing happens if the specified key does not belong to <i>this</i> counter Bloom filter.
+ @param key The key to remove.]]>
+      </doc>
+    </method>
+    <method name="and"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+    </method>
+    <method name="membershipTest" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+    </method>
+    <method name="approximateCount" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+      <doc>
+      <![CDATA[This method calculates an approximate count of the key, i.e. how many
+ times the key was added to the filter. This allows the filter to be
+ used as an approximate <code>key -&gt; count</code> map.
+ <p>NOTE: due to the bucket size of this filter, inserting the same
+ key more than 15 times will cause an overflow at all filter positions
+ associated with this key, and it will significantly increase the error
+ rate for this and other keys. For this reason the filter can only be
+ used to store small count values <code>0 &lt;= N &lt;&lt; 15</code>.
+ @param key key to be tested
+ @return 0 if the key is not present. Otherwise, a positive value v will
+ be returned such that <code>v == count</code> with probability equal to the
+ error rate of this filter, and <code>v &gt; count</code> otherwise.
+ Additionally, if the filter experienced an underflow as a result of
+ {@link #delete(Key)} operation, the return value may be lower than the
+ <code>count</code> with the probability of the false negative rate of such
+ filter.]]>
+      </doc>
+    </method>
+    <method name="not"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="or"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+    </method>
+    <method name="xor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Implements a <i>counting Bloom filter</i>, as defined by Fan et al. in a ToN
+ 2000 paper.
+ <p>
+ A counting Bloom filter is an improvement to standard a Bloom filter as it
+ allows dynamic additions and deletions of set membership information.  This 
+ is achieved through the use of a counting vector instead of a bit vector.
+ <p>
+ Originally created by
+ <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.
+
+ @see Filter The general behavior of a filter
+ 
+ @see <a href="http://portal.acm.org/citation.cfm?id=343571.343572">Summary cache: a scalable wide-area web cache sharing protocol</a>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.bloom.CountingBloomFilter -->
+  <!-- start class org.apache.hadoop.util.bloom.DynamicBloomFilter -->
+  <class name="DynamicBloomFilter" extends="org.apache.hadoop.util.bloom.Filter"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="DynamicBloomFilter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Zero-args constructor for the serialization.]]>
+      </doc>
+    </constructor>
+    <constructor name="DynamicBloomFilter" type="int, int, int, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor.
+ <p>
+ Builds an empty Dynamic Bloom filter.
+ @param vectorSize The number of bits in the vector.
+ @param nbHash The number of hash function to consider.
+ @param hashType type of the hashing function (see
+ {@link org.apache.hadoop.util.hash.Hash}).
+ @param nr The threshold for the maximum number of keys to record in a
+ dynamic Bloom filter row.]]>
+      </doc>
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+    </method>
+    <method name="and"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+    </method>
+    <method name="membershipTest" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+    </method>
+    <method name="not"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="or"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+    </method>
+    <method name="xor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filter" type="org.apache.hadoop.util.bloom.Filter"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Implements a <i>dynamic Bloom filter</i>, as defined in the INFOCOM 2006 paper.
+ <p>
+ A dynamic Bloom filter (DBF) makes use of a <code>s * m</code> bit matrix but
+ each of the <code>s</code> rows is a standard Bloom filter. The creation 
+ process of a DBF is iterative. At the start, the DBF is a <code>1 * m</code>
+ bit matrix, i.e., it is composed of a single standard Bloom filter.
+ It assumes that <code>n<sub>r</sub></code> elements are recorded in the 
+ initial bit vector, where <code>n<sub>r</sub> {@literal <=} n</code>
+ (<code>n</code> is the cardinality of the set <code>A</code> to record in
+ the filter).
+ <p>
+ As the size of <code>A</code> grows during the execution of the application,
+ several keys must be inserted in the DBF.  When inserting a key into the DBF,
+ one must first get an active Bloom filter in the matrix.  A Bloom filter is
+ active when the number of recorded keys, <code>n<sub>r</sub></code>, is 
+ strictly less than the current cardinality of <code>A</code>, <code>n</code>.
+ If an active Bloom filter is found, the key is inserted and 
+ <code>n<sub>r</sub></code> is incremented by one. On the other hand, if there
+ is no active Bloom filter, a new one is created (i.e., a new row is added to
+ the matrix) according to the current size of <code>A</code> and the element
+ is added in this new Bloom filter and the <code>n<sub>r</sub></code> value of
+ this new Bloom filter is set to one.  A given key is said to belong to the
+ DBF if the <code>k</code> positions are set to one in one of the matrix rows.
+ <p>
+ Originally created by
+ <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.
+
+ @see Filter The general behavior of a filter
+ @see BloomFilter A Bloom filter
+ 
+ @see <a href="http://www.cse.fau.edu/~jie/research/publications/Publication_files/infocom2006.pdf">Theory and Network Applications of Dynamic Bloom Filters</a>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.bloom.DynamicBloomFilter -->
+  <!-- start class org.apache.hadoop.util.bloom.HashFunction -->
+  <class name="HashFunction" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="HashFunction" type="int, int, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor.
+ <p>
+ Builds a hash function that must obey to a given maximum number of returned values and a highest value.
+ @param maxValue The maximum highest returned value.
+ @param nbHash The number of resulting hashed values.
+ @param hashType type of the hashing function (see {@link Hash}).]]>
+      </doc>
+    </constructor>
+    <method name="clear"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Clears <i>this</i> hash function. A NOOP]]>
+      </doc>
+    </method>
+    <method name="hash" return="int[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="k" type="org.apache.hadoop.util.bloom.Key"/>
+      <doc>
+      <![CDATA[Hashes a specified key into several integers.
+ @param k The specified key.
+ @return The array of hashed values.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Implements a hash object that returns a certain number of hashed values.
+ 
+ @see Key The general behavior of a key being stored in a filter
+ @see Filter The general behavior of a filter]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.bloom.HashFunction -->
+  <!-- start interface org.apache.hadoop.util.bloom.RemoveScheme -->
+  <interface name="RemoveScheme"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <field name="RANDOM" type="short"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Random selection.
+ <p>
+ The idea is to randomly select a bit to reset.]]>
+      </doc>
+    </field>
+    <field name="MINIMUM_FN" type="short"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[MinimumFN Selection.
+ <p>
+ The idea is to select the bit to reset that will generate the minimum
+ number of false negative.]]>
+      </doc>
+    </field>
+    <field name="MAXIMUM_FP" type="short"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[MaximumFP Selection.
+ <p>
+ The idea is to select the bit to reset that will remove the maximum number
+ of false positive.]]>
+      </doc>
+    </field>
+    <field name="RATIO" type="short"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Ratio Selection.
+ <p>
+ The idea is to select the bit to reset that will, at the same time, remove
+ the maximum number of false positve while minimizing the amount of false
+ negative generated.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[Defines the different remove scheme for retouched Bloom filters.
+ <p>
+ Originally created by
+ <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.util.bloom.RemoveScheme -->
+  <!-- start class org.apache.hadoop.util.bloom.RetouchedBloomFilter -->
+  <class name="RetouchedBloomFilter" extends="org.apache.hadoop.util.bloom.BloomFilter"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.util.bloom.RemoveScheme"/>
+    <constructor name="RetouchedBloomFilter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor - use with readFields]]>
+      </doc>
+    </constructor>
+    <constructor name="RetouchedBloomFilter" type="int, int, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor
+ @param vectorSize The vector size of <i>this</i> filter.
+ @param nbHash The number of hash function to consider.
+ @param hashType type of the hashing function (see
+ {@link org.apache.hadoop.util.hash.Hash}).]]>
+      </doc>
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+    </method>
+    <method name="addFalsePositive"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.util.bloom.Key"/>
+      <doc>
+      <![CDATA[Adds a false positive information to <i>this</i> retouched Bloom filter.
+ <p>
+ <b>Invariant</b>: if the false positive is <code>null</code>, nothing happens.
+ @param key The false positive key to add.]]>
+      </doc>
+    </method>
+    <method name="addFalsePositive"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="coll" type="java.util.Collection"/>
+      <doc>
+      <![CDATA[Adds a collection of false positive information to <i>this</i> retouched Bloom filter.
+ @param coll The collection of false positive.]]>
+      </doc>
+    </method>
+    <method name="addFalsePositive"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="keys" type="java.util.List"/>
+      <doc>
+      <![CDATA[Adds a list of false positive information to <i>this</i> retouched Bloom filter.
+ @param keys The list of false positive.]]>
+      </doc>
+    </method>
+    <method name="addFalsePositive"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="keys" type="org.apache.hadoop.util.bloom.Key[]"/>
+      <doc>
+      <![CDATA[Adds an array of false positive information to <i>this</i> retouched Bloom filter.
+ @param keys The array of false positive.]]>
+      </doc>
+    </method>
+    <method name="selectiveClearing"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="k" type="org.apache.hadoop.util.bloom.Key"/>
+      <param name="scheme" type="short"/>
+      <doc>
+      <![CDATA[Performs the selective clearing for a given key.
+ @param k The false positive key to remove from <i>this</i> retouched Bloom filter.
+ @param scheme The selective clearing scheme to apply.]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Implements a <i>retouched Bloom filter</i>, as defined in the CoNEXT 2006 paper.
+ <p>
+ It allows the removal of selected false positives at the cost of introducing
+ random false negatives, and with the benefit of eliminating some random false
+ positives at the same time.
+ 
+ <p>
+ Originally created by
+ <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.
+ 
+ @see Filter The general behavior of a filter
+ @see BloomFilter A Bloom filter
+ @see RemoveScheme The different selective clearing algorithms
+ 
+ @see <a href="http://www-rp.lip6.fr/site_npa/site_rp/_publications/740-rbf_cameraready.pdf">Retouched Bloom Filters: Allowing Networked Applications to Trade Off Selected False Positives Against False Negatives</a>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.bloom.RetouchedBloomFilter -->
+</package>
+<package name="org.apache.hadoop.util.curator">
+</package>
+<package name="org.apache.hadoop.util.functional">
+  <!-- start class org.apache.hadoop.util.functional.FutureIO -->
+  <class name="FutureIO" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="awaitFuture" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="future" type="java.util.concurrent.Future"/>
+      <exception name="InterruptedIOException" type="java.io.InterruptedIOException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="RuntimeException" type="java.lang.RuntimeException"/>
+      <doc>
+      <![CDATA[Given a future, evaluate it.
+ <p>
+ Any exception generated in the future is
+ extracted and rethrown.
+ </p>
+ @param future future to evaluate
+ @param <T> type of the result.
+ @return the result, if all went well.
+ @throws InterruptedIOException future was interrupted
+ @throws IOException if something went wrong
+ @throws RuntimeException any nested RTE thrown]]>
+      </doc>
+    </method>
+    <method name="awaitFuture" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="future" type="java.util.concurrent.Future"/>
+      <param name="timeout" type="long"/>
+      <param name="unit" type="java.util.concurrent.TimeUnit"/>
+      <exception name="InterruptedIOException" type="java.io.InterruptedIOException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="RuntimeException" type="java.lang.RuntimeException"/>
+      <exception name="TimeoutException" type="java.util.concurrent.TimeoutException"/>
+      <doc>
+      <![CDATA[Given a future, evaluate it.
+ <p>
+ Any exception generated in the future is
+ extracted and rethrown.
+ </p>
+ @param future future to evaluate
+ @param <T> type of the result.
+ @return the result, if all went well.
+ @throws InterruptedIOException future was interrupted
+ @throws IOException if something went wrong
+ @throws RuntimeException any nested RTE thrown
+ @throws TimeoutException the future timed out.]]>
+      </doc>
+    </method>
+    <method name="raiseInnerCause" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="e" type="java.util.concurrent.ExecutionException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[From the inner cause of an execution exception, extract the inner cause
+ if it is an IOE or RTE.
+ This will always raise an exception, either the inner IOException,
+ an inner RuntimeException, or a new IOException wrapping the raised
+ exception.
+
+ @param e exception.
+ @param <T> type of return value.
+ @return nothing, ever.
+ @throws IOException either the inner IOException, or a wrapper around
+ any non-Runtime-Exception
+ @throws RuntimeException if that is the inner cause.]]>
+      </doc>
+    </method>
+    <method name="raiseInnerCause" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="e" type="java.util.concurrent.CompletionException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Extract the cause of a completion failure and rethrow it if an IOE
+ or RTE.
+ @param e exception.
+ @param <T> type of return value.
+ @return nothing, ever.
+ @throws IOException either the inner IOException, or a wrapper around
+ any non-Runtime-Exception
+ @throws RuntimeException if that is the inner cause.]]>
+      </doc>
+    </method>
+    <method name="unwrapInnerException" return="java.io.IOException"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="e" type="java.lang.Throwable"/>
+      <doc>
+      <![CDATA[From the inner cause of an execution exception, extract the inner cause
+ to an IOException, raising RuntimeExceptions and Errors immediately.
+ <ol>
+   <li> If it is an IOE: Return.</li>
+   <li> If it is a {@link UncheckedIOException}: return the cause</li>
+   <li> Completion/Execution Exceptions: extract and repeat</li>
+   <li> If it is an RTE or Error: throw.</li>
+   <li> Any other type: wrap in an IOE</li>
+ </ol>
+
+ Recursively handles wrapped Execution and Completion Exceptions in
+ case something very complicated has happened.
+ @param e exception.
+ @return an IOException extracted or built from the cause.
+ @throws RuntimeException if that is the inner cause.
+ @throws Error if that is the inner cause.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Future IO Helper methods.
+ <p>
+ Contains methods promoted from
+ {@link org.apache.hadoop.fs.impl.FutureIOSupport} because they
+ are a key part of integrating async IO in application code.
+ </p>
+ <p>
+ One key feature is that the {@link #awaitFuture(Future)} and
+ {@link #awaitFuture(Future, long, TimeUnit)} calls will
+ extract and rethrow exceptions raised in the future's execution,
+ including extracting the inner IOException of any
+ {@code UncheckedIOException} raised in the future.
+ This makes it somewhat easier to execute IOException-raising
+ code inside futures.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.functional.FutureIO -->
+  <!-- start class org.apache.hadoop.util.functional.RemoteIterators -->
+  <class name="RemoteIterators" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="remoteIteratorFromSingleton" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="singleton" type="T"/>
+      <doc>
+      <![CDATA[Create an iterator from a singleton.
+ @param singleton instance
+ @param <T> type
+ @return a remote iterator]]>
+      </doc>
+    </method>
+    <method name="remoteIteratorFromIterator" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="iterator" type="java.util.Iterator"/>
+      <doc>
+      <![CDATA[Create a remote iterator from a java.util.Iterator.
+ @param <T> type
+ @return a remote iterator]]>
+      </doc>
+    </method>
+    <method name="remoteIteratorFromIterable" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="iterable" type="java.lang.Iterable"/>
+      <doc>
+      <![CDATA[Create a remote iterator from a java.util.Iterable -e.g. a list
+ or other collection.
+ @param <T> type
+ @return a remote iterator]]>
+      </doc>
+    </method>
+    <method name="remoteIteratorFromArray" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="array" type="T[]"/>
+      <doc>
+      <![CDATA[Create a remote iterator from an array.
+ @param <T> type
+ @return a remote iterator]]>
+      </doc>
+    </method>
+    <method name="mappingRemoteIterator" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="iterator" type="org.apache.hadoop.fs.RemoteIterator"/>
+      <param name="mapper" type="org.apache.hadoop.util.functional.FunctionRaisingIOE"/>
+      <doc>
+      <![CDATA[Create an iterator from an iterator and a transformation function.
+ @param <S> source type
+ @param <T> result type
+ @param iterator source
+ @param mapper transformation
+ @return a remote iterator]]>
+      </doc>
+    </method>
+    <method name="typeCastingRemoteIterator" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="iterator" type="org.apache.hadoop.fs.RemoteIterator"/>
+      <doc>
+      <![CDATA[Create a RemoteIterator from a RemoteIterator, casting the
+ type in the process. This is to help with filesystem API
+ calls where overloading causes confusion (e.g. listStatusIterator())
+ @param <S> source type
+ @param <T> result type
+ @param iterator source
+ @return a remote iterator]]>
+      </doc>
+    </method>
+    <method name="filteringRemoteIterator" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="iterator" type="org.apache.hadoop.fs.RemoteIterator"/>
+      <param name="filter" type="org.apache.hadoop.util.functional.FunctionRaisingIOE"/>
+      <doc>
+      <![CDATA[Create a RemoteIterator from a RemoteIterator and a filter
+ function which returns true for every element to be passed
+ through.
+ <p></p>
+ Elements are filtered in the hasNext() method; if not used
+ the filtering will be done on demand in the {@code next()}
+ call.
+ @param <S> type
+ @param iterator source
+ @param filter filter
+ @return a remote iterator]]>
+      </doc>
+    </method>
+    <method name="closingRemoteIterator" return="org.apache.hadoop.fs.RemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="iterator" type="org.apache.hadoop.fs.RemoteIterator"/>
+      <param name="toClose" type="java.io.Closeable"/>
+      <doc>
+      <![CDATA[This adds an extra close operation alongside the passthrough
+ to any Closeable.close() method supported by the source iterator.
+ @param iterator source
+ @param toClose extra object to close.
+ @param <S> source type.
+ @return a new iterator]]>
+      </doc>
+    </method>
+    <method name="toList" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="source" type="org.apache.hadoop.fs.RemoteIterator"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Build a list from a RemoteIterator.
+ @param source source iterator
+ @param <T> type
+ @return a list of the values.
+ @throws IOException if the source RemoteIterator raises it.]]>
+      </doc>
+    </method>
+    <method name="toArray" return="T[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="source" type="org.apache.hadoop.fs.RemoteIterator"/>
+      <param name="a" type="T[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Build an array from a RemoteIterator.
+ @param source source iterator
+ @param a destination array; if too small a new array
+ of the same type is created
+ @param <T> type
+ @return an array of the values.
+ @throws IOException if the source RemoteIterator raises it.]]>
+      </doc>
+    </method>
+    <method name="foreach" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="source" type="org.apache.hadoop.fs.RemoteIterator"/>
+      <param name="consumer" type="org.apache.hadoop.util.functional.ConsumerRaisingIOE"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Apply an operation to all values of a RemoteIterator.
+ <p></p>
+ If the iterator is an IOStatisticsSource returning a non-null
+ set of statistics, <i>and</i> this classes log is set to DEBUG,
+ then the statistics of the operation are evaluated and logged at
+ debug.
+ <p></p>
+ The number of entries processed is returned, as it is useful to
+ know this, especially during tests or when reporting values
+ to users.
+ <p></p>
+ This does not close the iterator afterwards.
+ @param source iterator source
+ @param consumer consumer of the values.
+ @return the number of elements processed
+ @param <T> type of source
+ @throws IOException if the source RemoteIterator or the consumer raise one.]]>
+      </doc>
+    </method>
+    <method name="cleanupRemoteIterator"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="source" type="org.apache.hadoop.fs.RemoteIterator"/>
+      <doc>
+      <![CDATA[Clean up after an iteration.
+ If the log is at debug, calculate and log the IOStatistics.
+ If the iterator is closeable, cast and then cleanup the iterator
+ @param source iterator source
+ @param <T> type of source]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A set of remote iterators supporting transformation and filtering,
+ with IOStatisticsSource passthrough, and of conversions of
+ the iterators to lists/arrays and of performing actions
+ on the values.
+ <p></p>
+ This aims to make it straightforward to use lambda-expressions to
+ transform the results of an iterator, without losing the statistics
+ in the process, and to chain the operations together.
+ <p></p>
+ The closeable operation will be passed through RemoteIterators which
+ wrap other RemoteIterators. This is to support any iterator which
+ can be closed to release held connections, file handles etc.
+ Unless client code is written to assume that RemoteIterator instances
+ may be closed, this is not likely to be broadly used. It is added
+ to make it possible to adopt this feature in a managed way.
+ <p></p>
+ One notable feature is that the
+ {@link #foreach(RemoteIterator, ConsumerRaisingIOE)} method will
+ LOG at debug any IOStatistics provided by the iterator, if such
+ statistics are provided. There's no attempt at retrieval and logging
+ if the LOG is not set to debug, so it is a zero cost feature unless
+ the logger {@code org.apache.hadoop.fs.functional.RemoteIterators}
+ is at DEBUG.
+ <p></p>
+ Based on the S3A Listing code, and some some work on moving other code
+ to using iterative listings so as to pick up the statistics.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.util.functional.RemoteIterators -->
+</package>
+<package name="org.apache.hadoop.util.hash">
+</package>
+
+</api>
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.3.4/CHANGELOG.3.3.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.3.4/CHANGELOG.3.3.4.md
new file mode 100644
index 0000000..78b8052
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.3.4/CHANGELOG.3.3.4.md
@@ -0,0 +1,56 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop Changelog
+
+## Release 3.3.4 - 2022-07-29
+
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-18044](https://issues.apache.org/jira/browse/HADOOP-18044) | Hadoop - Upgrade to JQuery 3.6.0 |  Major | . | Yuan Luo | Yuan Luo |
+| [YARN-11195](https://issues.apache.org/jira/browse/YARN-11195) | Document how to configure NUMA in YARN |  Major | documentation | Prabhu Joseph | Samrat Deb |
+| [HADOOP-18332](https://issues.apache.org/jira/browse/HADOOP-18332) | Remove rs-api dependency by downgrading jackson to 2.12.7 |  Major | build | PJ Fanning | PJ Fanning |
+| [HADOOP-18354](https://issues.apache.org/jira/browse/HADOOP-18354) | Upgrade reload4j to 1.2.22 due to XXE vulnerability |  Major | . | PJ Fanning | PJ Fanning |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-18085](https://issues.apache.org/jira/browse/HADOOP-18085) | S3 SDK Upgrade causes AccessPoint ARN endpoint mistranslation |  Major | fs/s3, test | Bogdan Stolojan | Bogdan Stolojan |
+| [YARN-11092](https://issues.apache.org/jira/browse/YARN-11092) | Upgrade jquery ui to 1.13.1 |  Major | . | D M Murali Krishna Reddy | groot |
+| [HDFS-16453](https://issues.apache.org/jira/browse/HDFS-16453) | Upgrade okhttp from 2.7.5 to 4.9.3 |  Major | hdfs-client | Ivan Viaznikov | groot |
+| [YARN-10974](https://issues.apache.org/jira/browse/YARN-10974) | CS UI: queue filter and openQueues param do not work as expected |  Major | capacity scheduler | Chengbing Liu | Chengbing Liu |
+| [HADOOP-18237](https://issues.apache.org/jira/browse/HADOOP-18237) | Upgrade Apache Xerces Java to 2.12.2 |  Major | build | groot | groot |
+| [HADOOP-18074](https://issues.apache.org/jira/browse/HADOOP-18074) | Partial/Incomplete groups list can be returned in LDAP groups lookup |  Major | security | Philippe Lanoe | Larry McCay |
+| [HADOOP-18079](https://issues.apache.org/jira/browse/HADOOP-18079) | Upgrade Netty to 4.1.77.Final |  Major | build | Renukaprasad C | Wei-Chiu Chuang |
+
+
+### SUB-TASKS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|:---- |:---- | :--- |:---- |:---- |:---- |
+| [HADOOP-18068](https://issues.apache.org/jira/browse/HADOOP-18068) | Upgrade AWS SDK to 1.12.132 |  Major | build, fs/s3 | Steve Loughran | Steve Loughran |
+| [HADOOP-18307](https://issues.apache.org/jira/browse/HADOOP-18307) | remove hadoop-cos as a dependency of hadoop-cloud-storage |  Major | bulid, fs | Steve Loughran | Steve Loughran |
+| [HADOOP-18344](https://issues.apache.org/jira/browse/HADOOP-18344) | AWS SDK update to 1.12.262 to address jackson  CVE-2018-7489 |  Major | fs/s3 | Steve Loughran | Steve Loughran |
+
+
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.3.4/RELEASENOTES.3.3.4.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.3.4/RELEASENOTES.3.3.4.md
new file mode 100644
index 0000000..7957388
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.3.4/RELEASENOTES.3.3.4.md
@@ -0,0 +1,66 @@
+
+<!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+-->
+# Apache Hadoop  3.3.4 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
+
+
+---
+
+* [HDFS-16453](https://issues.apache.org/jira/browse/HDFS-16453) | *Major* | **Upgrade okhttp from 2.7.5 to 4.9.3**
+
+okhttp has been updated to address CVE-2021-0341
+
+
+---
+
+* [HADOOP-18237](https://issues.apache.org/jira/browse/HADOOP-18237) | *Major* | **Upgrade Apache Xerces Java to 2.12.2**
+
+Apache Xerces has been updated  to 2.12.2 to fix CVE-2022-23437
+
+
+---
+
+* [HADOOP-18307](https://issues.apache.org/jira/browse/HADOOP-18307) | *Major* | **remove hadoop-cos as a dependency of hadoop-cloud-storage**
+
+We have recently become aware that libraries which include a shaded apache httpclient libraries (hadoop-client-runtime.jar, aws-java-sdk-bundle.jar, gcs-connector-shaded.jar, cos\_api-bundle-5.6.19.jar) all load and use the unshaded resource mozilla/public-suffix-list.txt. If an out of date version of this is found on the classpath first, attempts to negotiate TLS connections may fail with the error "Certificate doesn't match any of the subject alternative names". This release does not declare the hadoop-cos library to be a dependency of the hadoop-cloud-storage POM, so applications depending on that module are no longer exposed to this issue. If an application requires use of the hadoop-cos module, please declare an explicit dependency.
+
+
+---
+
+* [HADOOP-18332](https://issues.apache.org/jira/browse/HADOOP-18332) | *Major* | **Remove rs-api dependency by downgrading jackson to 2.12.7**
+
+Downgrades Jackson from 2.13.2 to 2.12.7 to fix class conflicts in downstream projects. This version of jackson does contain the fix for CVE-2020-36518.
+
+
+---
+
+* [HADOOP-18079](https://issues.apache.org/jira/browse/HADOOP-18079) | *Major* | **Upgrade Netty to 4.1.77.Final**
+
+Netty has been updated to address CVE-2019-20444, CVE-2019-20445 and CVE-2022-24823
+
+
+---
+
+* [HADOOP-18344](https://issues.apache.org/jira/browse/HADOOP-18344) | *Major* | **AWS SDK update to 1.12.262 to address jackson  CVE-2018-7489**
+
+The AWS SDK has been updated to 1.12.262 to address jackson CVE-2018-7489
+
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.3.4.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.3.4.xml
new file mode 100644
index 0000000..b3978b0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.3.4.xml
@@ -0,0 +1,835 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Fri Jul 29 14:04:11 GMT 2022 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop HDFS 3.3.4"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/classes:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-3.3.4.jar:/maven/org/slf4j/slf4j-api/1.7.36/slf4j-api-1.7.36.jar:/maven/org/apache/httpcomponents/httpclient/4.5.13/httpclient-4.5.13.jar:/maven/org/apache/httpcomponents/httpcore/4.4.13/httpcore-4.4.13.jar:/maven/com/nimbusds/nimbus-jose-jwt/9.8.1/nimbus-jose-jwt-9.8.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/2.4.7/json-smart-2.4.7.jar:/maven/net/minidev/accessors-smart/2.4.7/accessors-smart-2.4.7.jar:/maven/org/ow2/asm/asm/5.0.4/asm-5.0.4.jar:/maven/org/apache/zookeeper/zookeeper/3.5.6/zookeeper-3.5.6.jar:/maven/org/apache/curator/curator-framework/4.2.0/curator-framework-4.2.0.jar:/maven/org/apache/kerby/kerb-simplekdc/1.0.1/kerb-simplekdc-1.0.1.jar:/maven/org/apache/kerby/kerb-client/1.0.1/kerb-client-1.0.1.jar:/maven/org/apache/kerby/kerby-config/1.0.1/kerby-config-1.0.1.jar:/maven/org/apache/kerby/kerb-common/1.0.1/kerb-common-1.0.1.jar:/maven/org/apache/kerby/kerb-crypto/1.0.1/kerb-crypto-1.0.1.jar:/maven/org/apache/kerby/kerb-util/1.0.1/kerb-util-1.0.1.jar:/maven/org/apache/kerby/token-provider/1.0.1/token-provider-1.0.1.jar:/maven/org/apache/kerby/kerb-admin/1.0.1/kerb-admin-1.0.1.jar:/maven/org/apache/kerby/kerb-server/1.0.1/kerb-server-1.0.1.jar:/maven/org/apache/kerby/kerb-identity/1.0.1/kerb-identity-1.0.1.jar:/maven/org/apache/kerby/kerby-xdr/1.0.1/kerby-xdr-1.0.1.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-3.3.4.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-protobuf_3_7/1.1.1/hadoop-shaded-protobuf_3_7-1.1.1.jar:/maven/com/google/guava/guava/27.0-jre/guava-27.0-jre.jar:/maven/com/google/guava/failureaccess/1.0/failureaccess-1.0.jar:/maven/com/google/guava/listenablefuture/9999.0-empty-to-avoid-conflict-with-guava/listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar:/maven/org/checkerframework/checker-qual/2.5.2/checker-qual-2.5.2.jar:/maven/com/google/j2objc/j2objc-annotations/1.1/j2objc-annotations-1.1.jar:/maven/org/codehaus/mojo/animal-sniffer-annotations/1.17/animal-sniffer-annotations-1.17.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/commons-net/commons-net/3.6/commons-net-3.6.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/jakarta/activation/jakarta.activation-api/1.2.1/jakarta.activation-api-1.2.1.jar:/maven/org/eclipse/jetty/jetty-servlet/9.4.43.v20210629/jetty-servlet-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-security/9.4.43.v20210629/jetty-security-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-webapp/9.4.43.v20210629/jetty-webapp-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-xml/9.4.43.v20210629/jetty-xml-9.4.43.v20210629.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-servlet/1.19/jersey-servlet-1.19.jar:/maven/com/sun/jersey/jersey-json/1.19/jersey-json-1.19.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.11/jaxb-api-2.2.11.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/commons-beanutils/commons-beanutils/1.9.4/commons-beanutils-1.9.4.jar:/maven/org/apache/commons/commons-configuration2/2.1.1/commons-configuration2-2.1.1.jar:/maven/org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.jar:/maven/org/apache/commons/commons-text/1.4/commons-text-1.4.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/com/google/re2j/re2j/1.1/re2j-1.1.jar:/maven/com/google/code/gson/gson/2.8.9/gson-2.8.9.jar:/maven/com/jcraft/jsch/0.1.55/jsch-0.1.55.jar:/maven/org/apache/curator/curator-client/4.2.0/curator-client-4.2.0.jar:/maven/org/apache/curator/curator-recipes/4.2.0/curator-recipes-4.2.0.jar:/maven/com/google/code/findbugs/jsr305/3.0.2/jsr305-3.0.2.jar:/maven/org/apache/commons/commons-compress/1.21/commons-compress-1.21.jar:/maven/org/apache/kerby/kerb-core/1.0.1/kerb-core-1.0.1.jar:/maven/org/apache/kerby/kerby-pkix/1.0.1/kerby-pkix-1.0.1.jar:/maven/org/apache/kerby/kerby-asn1/1.0.1/kerby-asn1-1.0.1.jar:/maven/org/apache/kerby/kerby-util/1.0.1/kerby-util-1.0.1.jar:/maven/org/codehaus/woodstox/stax2-api/4.2.1/stax2-api-4.2.1.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.3.0/woodstox-core-5.3.0.jar:/maven/dnsjava/dnsjava/2.1.7/dnsjava-2.1.7.jar:/maven/org/xerial/snappy/snappy-java/1.1.8.2/snappy-java-1.1.8.2.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-3.3.4.jar:/maven/com/squareup/okhttp3/okhttp/4.9.3/okhttp-4.9.3.jar:/maven/com/squareup/okio/okio/2.8.0/okio-2.8.0.jar:/maven/org/jetbrains/kotlin/kotlin-stdlib/1.4.10/kotlin-stdlib-1.4.10.jar:/maven/org/jetbrains/kotlin/kotlin-stdlib-common/1.4.10/kotlin-stdlib-common-1.4.10.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.12.7/jackson-annotations-2.12.7.jar:/maven/org/apache/zookeeper/zookeeper-jute/3.5.6/zookeeper-jute-3.5.6.jar:/maven/org/apache/yetus/audience-annotations/0.5.0/audience-annotations-0.5.0.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-guava/1.1.1/hadoop-shaded-guava-1.1.1.jar:/maven/org/eclipse/jetty/jetty-server/9.4.43.v20210629/jetty-server-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-http/9.4.43.v20210629/jetty-http-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-io/9.4.43.v20210629/jetty-io-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-util/9.4.43.v20210629/jetty-util-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-util-ajax/9.4.43.v20210629/jetty-util-ajax-9.4.43.v20210629.jar:/maven/com/sun/jersey/jersey-core/1.19/jersey-core-1.19.jar:/maven/javax/ws/rs/jsr311-api/1.1.1/jsr311-api-1.1.1.jar:/maven/com/sun/jersey/jersey-server/1.19/jersey-server-1.19.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-codec/commons-codec/1.15/commons-codec-1.15.jar:/maven/commons-io/commons-io/2.8.0/commons-io-2.8.0.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/maven/ch/qos/reload4j/reload4j/1.2.22/reload4j-1.2.22.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/javax/servlet/javax.servlet-api/3.1.0/javax.servlet-api-3.1.0.jar:/maven/com/google/code/findbugs/findbugs/3.0.1/findbugs-3.0.1.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/com/google/code/findbugs/bcel-findbugs/6.0/bcel-findbugs-6.0.jar:/maven/com/google/code/findbugs/jFormatString/2.0.1/jFormatString-2.0.1.jar:/maven/dom4j/dom4j/1.6.1/dom4j-1.6.1.jar:/maven/org/ow2/asm/asm-debug-all/5.0.2/asm-debug-all-5.0.2.jar:/maven/org/ow2/asm/asm-commons/5.0.2/asm-commons-5.0.2.jar:/maven/org/ow2/asm/asm-tree/5.0.2/asm-tree-5.0.2.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/com/apple/AppleJavaExtensions/1.4/AppleJavaExtensions-1.4.jar:/maven/jaxen/jaxen/1.1.6/jaxen-1.1.6.jar:/maven/org/slf4j/slf4j-reload4j/1.7.36/slf4j-reload4j-1.7.36.jar:/maven/io/netty/netty/3.10.6.Final/netty-3.10.6.Final.jar:/maven/io/netty/netty-all/4.1.77.Final/netty-all-4.1.77.Final.jar:/maven/io/netty/netty-buffer/4.1.77.Final/netty-buffer-4.1.77.Final.jar:/maven/io/netty/netty-codec/4.1.77.Final/netty-codec-4.1.77.Final.jar:/maven/io/netty/netty-codec-dns/4.1.77.Final/netty-codec-dns-4.1.77.Final.jar:/maven/io/netty/netty-codec-haproxy/4.1.77.Final/netty-codec-haproxy-4.1.77.Final.jar:/maven/io/netty/netty-codec-http/4.1.77.Final/netty-codec-http-4.1.77.Final.jar:/maven/io/netty/netty-codec-http2/4.1.77.Final/netty-codec-http2-4.1.77.Final.jar:/maven/io/netty/netty-codec-memcache/4.1.77.Final/netty-codec-memcache-4.1.77.Final.jar:/maven/io/netty/netty-codec-mqtt/4.1.77.Final/netty-codec-mqtt-4.1.77.Final.jar:/maven/io/netty/netty-codec-redis/4.1.77.Final/netty-codec-redis-4.1.77.Final.jar:/maven/io/netty/netty-codec-smtp/4.1.77.Final/netty-codec-smtp-4.1.77.Final.jar:/maven/io/netty/netty-codec-socks/4.1.77.Final/netty-codec-socks-4.1.77.Final.jar:/maven/io/netty/netty-codec-stomp/4.1.77.Final/netty-codec-stomp-4.1.77.Final.jar:/maven/io/netty/netty-codec-xml/4.1.77.Final/netty-codec-xml-4.1.77.Final.jar:/maven/io/netty/netty-common/4.1.77.Final/netty-common-4.1.77.Final.jar:/maven/io/netty/netty-handler/4.1.77.Final/netty-handler-4.1.77.Final.jar:/maven/io/netty/netty-handler-proxy/4.1.77.Final/netty-handler-proxy-4.1.77.Final.jar:/maven/io/netty/netty-resolver/4.1.77.Final/netty-resolver-4.1.77.Final.jar:/maven/io/netty/netty-resolver-dns/4.1.77.Final/netty-resolver-dns-4.1.77.Final.jar:/maven/io/netty/netty-transport/4.1.77.Final/netty-transport-4.1.77.Final.jar:/maven/io/netty/netty-transport-rxtx/4.1.77.Final/netty-transport-rxtx-4.1.77.Final.jar:/maven/io/netty/netty-transport-sctp/4.1.77.Final/netty-transport-sctp-4.1.77.Final.jar:/maven/io/netty/netty-transport-udt/4.1.77.Final/netty-transport-udt-4.1.77.Final.jar:/maven/io/netty/netty-transport-classes-epoll/4.1.77.Final/netty-transport-classes-epoll-4.1.77.Final.jar:/maven/io/netty/netty-transport-native-unix-common/4.1.77.Final/netty-transport-native-unix-common-4.1.77.Final.jar:/maven/io/netty/netty-transport-classes-kqueue/4.1.77.Final/netty-transport-classes-kqueue-4.1.77.Final.jar:/maven/io/netty/netty-resolver-dns-classes-macos/4.1.77.Final/netty-resolver-dns-classes-macos-4.1.77.Final.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.12.7/jackson-databind-2.12.7.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.12.7/jackson-core-2.12.7.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-3.3.4.jar:/usr/lib/jvm/java-8-openjdk-amd64/lib/tools.jar:/maven/xerces/xercesImpl/2.12.2/xercesImpl-2.12.2.jar:/maven/xml-apis/xml-apis/1.4.01/xml-apis-1.4.01.jar -sourcepath /build/source/hadoop-hdfs-project/hadoop-hdfs/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -apidir /build/source/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml -apiname Apache Hadoop HDFS 3.3.4 -->
+<package name="org.apache.hadoop.hdfs">
+  <doc>
+  <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}.  This is loosely modelled after
+Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files 
+have strictly one writer at any one time.  Bytes are always appended 
+to the end of the writer's stream.  There is no notion of "record appends"
+or "mutations" that are then checked or reordered.  Writers simply emit 
+a byte stream.  That byte stream is guaranteed to be stored in the 
+order written.</p>]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.net">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer">
+</package>
+<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl">
+</package>
+<package name="org.apache.hadoop.hdfs.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.client">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocol">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.protocolPB">
+</package>
+<package name="org.apache.hadoop.hdfs.qjournal.server">
+  <!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+  <interface name="JournalNodeMXBean"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getJournalsStatus" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals.
+ 
+ @return A string presenting status for each journal]]>
+      </doc>
+    </method>
+    <method name="getHostAndPort" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get host and port of JournalNode.
+
+ @return colon separated host and port.]]>
+      </doc>
+    </method>
+    <method name="getClusterIds" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get list of the clusters of JournalNode's journals
+ as one JournalNode may support multiple clusters.
+
+ @return list of clusters.]]>
+      </doc>
+    </method>
+    <method name="getVersion" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Gets the version of Hadoop.
+
+ @return the version of Hadoop.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the JMX management interface for JournalNode information]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean -->
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.block">
+</package>
+<package name="org.apache.hadoop.hdfs.security.token.delegation">
+</package>
+<package name="org.apache.hadoop.hdfs.server.aliasmap">
+  <!-- start class org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap -->
+  <class name="InMemoryAliasMap" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMapProtocol"/>
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="init" return="org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="list" return="org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMapProtocol.IterationResult"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="marker" type="java.util.Optional"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="read" return="java.util.Optional"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+      <param name="providedStorageLocation" type="org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getBlockPoolId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="fromProvidedStorageLocationBytes" return="org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="providedStorageLocationDbFormat" type="byte[]"/>
+      <exception name="InvalidProtocolBufferException" type="org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException"/>
+    </method>
+    <method name="fromBlockBytes" return="org.apache.hadoop.hdfs.protocol.Block"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="blockDbFormat" type="byte[]"/>
+      <exception name="InvalidProtocolBufferException" type="org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException"/>
+    </method>
+    <method name="toProtoBufBytes" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="providedStorageLocation" type="org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="toProtoBufBytes" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="transferForBootstrap"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="response" type="javax.servlet.http.HttpServletResponse"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="aliasMap" type="org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Transfer this aliasmap for bootstrapping standby Namenodes. The map is
+ transferred as a tar.gz archive. This archive needs to be extracted on the
+ standby Namenode.
+
+ @param response http response.
+ @param conf configuration to use.
+ @param aliasMap aliasmap to transfer.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="completeBootstrapTransfer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="aliasMap" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Extract the aliasmap archive to complete the bootstrap process. This method
+ has to be called after the aliasmap archive is transfered from the primary
+ Namenode.
+
+ @param aliasMap location of the aliasmap.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[InMemoryAliasMap is an implementation of the InMemoryAliasMapProtocol for
+ use with LevelDB.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.balancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.blockmanagement">
+</package>
+<package name="org.apache.hadoop.hdfs.server.common">
+  <!-- start interface org.apache.hadoop.hdfs.server.common.BlockAlias -->
+  <interface name="BlockAlias"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Interface used to load provided blocks.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.server.common.BlockAlias -->
+  <!-- start class org.apache.hadoop.hdfs.server.common.FileRegion -->
+  <class name="FileRegion" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.hdfs.server.common.BlockAlias"/>
+    <constructor name="FileRegion" type="long, org.apache.hadoop.fs.Path, long, long, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileRegion" type="long, org.apache.hadoop.fs.Path, long, long, long, byte[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileRegion" type="long, org.apache.hadoop.fs.Path, long, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileRegion" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProvidedStorageLocation" return="org.apache.hadoop.hdfs.protocol.ProvidedStorageLocation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[This class is used to represent provided blocks that are file regions,
+ i.e., can be described using (path, offset, length).]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.common.FileRegion -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.common.blockaliasmap">
+  <!-- start class org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap -->
+  <class name="BlockAliasMap" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="BlockAliasMap"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getReader" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns a reader to the alias map.
+ @param opts reader options
+ @param blockPoolID block pool id to use
+ @return {@link Reader} to the alias map. If a Reader for the blockPoolID
+ cannot be created, this will return null.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getWriter" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns the writer for the alias map.
+ @param opts writer options.
+ @param blockPoolID block pool id to use
+ @return {@link Writer} to the alias map.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="refresh"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Refresh the alias map.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[An abstract class used to read and write block maps for provided blocks.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.common.blockaliasmap.impl">
+  <!-- start class org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.LevelDBFileRegionAliasMap -->
+  <class name="LevelDBFileRegionAliasMap" extends="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="LevelDBFileRegionAliasMap"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getReader" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getWriter" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="refresh"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A LevelDB based implementation of {@link BlockAliasMap}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.LevelDBFileRegionAliasMap -->
+  <!-- start class org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap -->
+  <class name="TextFileRegionAliasMap" extends="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="TextFileRegionAliasMap"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getReader" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Reader.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getWriter" return="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="opts" type="org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap.Writer.Options"/>
+      <param name="blockPoolID" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="refresh"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="blockPoolIDFromFileName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="fileNameFromBlockPoolID" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="blockPoolID" type="java.lang.String"/>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class is used for block maps stored as text files,
+ with a specified delimiter.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web">
+</package>
+<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.command">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.connectors">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.datamodel">
+</package>
+<package name="org.apache.hadoop.hdfs.server.diskbalancer.planner">
+</package>
+<package name="org.apache.hadoop.hdfs.server.mover">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode">
+  <!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <interface name="AuditLogger"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="initialize"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Called during initialization of the logger.
+
+ @param conf The configuration object.]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <doc>
+      <![CDATA[Called to log an audit event.
+ <p>
+ This method must return as quickly as possible, since it's called
+ in a critical section of the NameNode's operation.
+
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's
+             metadata (permissions, owner, times, etc).]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface defining an audit logger.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.DefaultAuditLogger -->
+  <class name="DefaultAuditLogger" extends="org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="DefaultAuditLogger"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="initialize"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="logAuditMessage"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="message" type="java.lang.String"/>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="callerContext" type="org.apache.hadoop.ipc.CallerContext"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+    </method>
+    <field name="STRING_BUILDER" type="java.lang.ThreadLocal"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="isCallerContextEnabled" type="boolean"
+      transient="false" volatile="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="callerContextMaxLen" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The maximum bytes a caller context string can have.]]>
+      </doc>
+    </field>
+    <field name="callerSignatureMaxLen" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="logTokenTrackingId" type="boolean"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[adds a tracking ID for all audit log events.]]>
+      </doc>
+    </field>
+    <field name="debugCmdSet" type="java.util.Set"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[List of commands to provide debug messages.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[This class provides an interface for Namenode and Router to Audit events
+ information. This class can be extended and can be used when no access logger
+ is defined in the config file.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.DefaultAuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <class name="HdfsAuditLogger" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/>
+    <constructor name="HdfsAuditLogger"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="logAuditEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="callerContext" type="org.apache.hadoop.ipc.CallerContext"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String, String,
+ FileStatus)} with additional parameters related to logging delegation token
+ tracking IDs.
+ 
+ @param succeeded Whether authorization succeeded.
+ @param userName Name of the user executing the request.
+ @param addr Remote address of the request.
+ @param cmd The requested command.
+ @param src Path of affected source file.
+ @param dst Path of affected destination file (if any).
+ @param stat File information for operations that change the file's metadata
+          (permissions, owner, times, etc).
+ @param callerContext Context information of the caller
+ @param ugi UserGroupInformation of the current user, or null if not logging
+          token tracking information
+ @param dtSecretManager The token secret manager, or null if not logging
+          token tracking information]]>
+      </doc>
+    </method>
+    <method name="logAuditEvent"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="succeeded" type="boolean"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetAddress"/>
+      <param name="cmd" type="java.lang.String"/>
+      <param name="src" type="java.lang.String"/>
+      <param name="dst" type="java.lang.String"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/>
+      <doc>
+      <![CDATA[Same as
+ {@link #logAuditEvent(boolean, String, InetAddress, String, String,
+ String, FileStatus, CallerContext, UserGroupInformation,
+ DelegationTokenSecretManager)} without {@link CallerContext} information.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Extension of {@link AuditLogger}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger -->
+  <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+  <class name="INodeAttributeProvider" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="INodeAttributeProvider"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="start"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Initialize the provider. This method is called at NameNode startup
+ time.]]>
+      </doc>
+    </method>
+    <method name="stop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]>
+      </doc>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fullPath" type="java.lang.String"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pathElements" type="java.lang.String[]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="components" type="byte[][]"/>
+      <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/>
+    </method>
+    <method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/>
+      <doc>
+      <![CDATA[Can be over-ridden by implementations to provide a custom Access Control
+ Enforcer that can provide an alternate implementation of the
+ default permission checking logic.
+ @param defaultEnforcer The Default AccessControlEnforcer
+ @return The AccessControlEnforcer to use]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider -->
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.ha">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.top.window">
+</package>
+<package name="org.apache.hadoop.hdfs.server.namenode.web.resources">
+</package>
+<package name="org.apache.hadoop.hdfs.tools">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer">
+</package>
+<package name="org.apache.hadoop.hdfs.tools.snapshot">
+</package>
+<package name="org.apache.hadoop.hdfs.util">
+</package>
+<package name="org.apache.hadoop.hdfs.web">
+</package>
+<package name="org.apache.hadoop.hdfs.web.resources">
+</package>
+
+</api>
diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_3.3.4.xml b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_3.3.4.xml
new file mode 100644
index 0000000..b8737e9
--- /dev/null
+++ b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Common_3.3.4.xml
@@ -0,0 +1,113 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Fri Jul 29 14:15:00 GMT 2022 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop MapReduce Common 3.3.4"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/hadoop-annotations.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/jdiff.jar -verbose -classpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/classes:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-3.3.4.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-3.3.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-3.3.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/9.8.1/nimbus-jose-jwt-9.8.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/2.4.7/json-smart-2.4.7.jar:/maven/net/minidev/accessors-smart/2.4.7/accessors-smart-2.4.7.jar:/maven/org/ow2/asm/asm/5.0.4/asm-5.0.4.jar:/maven/org/apache/curator/curator-framework/4.2.0/curator-framework-4.2.0.jar:/maven/org/apache/kerby/kerb-simplekdc/1.0.1/kerb-simplekdc-1.0.1.jar:/maven/org/apache/kerby/kerb-client/1.0.1/kerb-client-1.0.1.jar:/maven/org/apache/kerby/kerby-config/1.0.1/kerby-config-1.0.1.jar:/maven/org/apache/kerby/kerb-common/1.0.1/kerb-common-1.0.1.jar:/maven/org/apache/kerby/kerb-crypto/1.0.1/kerb-crypto-1.0.1.jar:/maven/org/apache/kerby/kerb-util/1.0.1/kerb-util-1.0.1.jar:/maven/org/apache/kerby/token-provider/1.0.1/token-provider-1.0.1.jar:/maven/org/apache/kerby/kerb-admin/1.0.1/kerb-admin-1.0.1.jar:/maven/org/apache/kerby/kerb-server/1.0.1/kerb-server-1.0.1.jar:/maven/org/apache/kerby/kerb-identity/1.0.1/kerb-identity-1.0.1.jar:/maven/org/apache/kerby/kerby-xdr/1.0.1/kerby-xdr-1.0.1.jar:/maven/javax/xml/bind/jaxb-api/2.2.11/jaxb-api-2.2.11.jar:/maven/org/apache/commons/commons-compress/1.21/commons-compress-1.21.jar:/maven/javax/servlet/javax.servlet-api/3.1.0/javax.servlet-api-3.1.0.jar:/maven/org/eclipse/jetty/jetty-util/9.4.43.v20210629/jetty-util-9.4.43.v20210629.jar:/maven/com/sun/jersey/jersey-core/1.19/jersey-core-1.19.jar:/maven/javax/ws/rs/jsr311-api/1.1.1/jsr311-api-1.1.1.jar:/maven/com/sun/jersey/jersey-client/1.19/jersey-client-1.19.jar:/maven/commons-io/commons-io/2.8.0/commons-io-2.8.0.jar:/maven/com/google/inject/guice/4.0/guice-4.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/jersey-server/1.19/jersey-server-1.19.jar:/maven/com/sun/jersey/jersey-json/1.19/jersey-json-1.19.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.19/jersey-guice-1.19.jar:/maven/ch/qos/reload4j/reload4j/1.2.22/reload4j-1.2.22.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.12.7/jackson-core-2.12.7.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.12.7/jackson-databind-2.12.7.jar:/maven/com/fasterxml/jackson/module/jackson-module-jaxb-annotations/2.12.7/jackson-module-jaxb-annotations-2.12.7.jar:/maven/jakarta/xml/bind/jakarta.xml.bind-api/2.3.2/jakarta.xml.bind-api-2.3.2.jar:/maven/com/fasterxml/jackson/jaxrs/jackson-jaxrs-json-provider/2.12.7/jackson-jaxrs-json-provider-2.12.7.jar:/maven/com/fasterxml/jackson/jaxrs/jackson-jaxrs-base/2.12.7/jackson-jaxrs-base-2.12.7.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-yarn-client-3.3.4.jar:/maven/org/eclipse/jetty/websocket/websocket-client/9.4.43.v20210629/websocket-client-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-client/9.4.43.v20210629/jetty-client-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-io/9.4.43.v20210629/jetty-io-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/websocket/websocket-common/9.4.43.v20210629/websocket-common-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/websocket/websocket-api/9.4.43.v20210629/websocket-api-9.4.43.v20210629.jar:/maven/org/jline/jline/3.9.0/jline-3.9.0.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-mapreduce-client-core-3.3.4.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.1.8.2/snappy-java-1.1.8.2.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-3.3.4.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-protobuf_3_7/1.1.1/hadoop-shaded-protobuf_3_7-1.1.1.jar:/maven/com/google/guava/guava/27.0-jre/guava-27.0-jre.jar:/maven/com/google/guava/failureaccess/1.0/failureaccess-1.0.jar:/maven/com/google/guava/listenablefuture/9999.0-empty-to-avoid-conflict-with-guava/listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar:/maven/org/checkerframework/checker-qual/2.5.2/checker-qual-2.5.2.jar:/maven/com/google/j2objc/j2objc-annotations/1.1/j2objc-annotations-1.1.jar:/maven/org/codehaus/mojo/animal-sniffer-annotations/1.17/animal-sniffer-annotations-1.17.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/org/apache/httpcomponents/httpclient/4.5.13/httpclient-4.5.13.jar:/maven/org/apache/httpcomponents/httpcore/4.4.13/httpcore-4.4.13.jar:/maven/commons-net/commons-net/3.6/commons-net-3.6.jar:/maven/jakarta/activation/jakarta.activation-api/1.2.1/jakarta.activation-api-1.2.1.jar:/maven/org/eclipse/jetty/jetty-server/9.4.43.v20210629/jetty-server-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-http/9.4.43.v20210629/jetty-http-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-servlet/9.4.43.v20210629/jetty-servlet-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-security/9.4.43.v20210629/jetty-security-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-webapp/9.4.43.v20210629/jetty-webapp-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-xml/9.4.43.v20210629/jetty-xml-9.4.43.v20210629.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-servlet/1.19/jersey-servlet-1.19.jar:/maven/commons-beanutils/commons-beanutils/1.9.4/commons-beanutils-1.9.4.jar:/maven/org/apache/commons/commons-configuration2/2.1.1/commons-configuration2-2.1.1.jar:/maven/org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.jar:/maven/org/apache/commons/commons-text/1.4/commons-text-1.4.jar:/maven/com/google/re2j/re2j/1.1/re2j-1.1.jar:/maven/com/google/code/gson/gson/2.8.9/gson-2.8.9.jar:/maven/com/jcraft/jsch/0.1.55/jsch-0.1.55.jar:/maven/org/apache/curator/curator-client/4.2.0/curator-client-4.2.0.jar:/maven/org/apache/curator/curator-recipes/4.2.0/curator-recipes-4.2.0.jar:/maven/com/google/code/findbugs/jsr305/3.0.2/jsr305-3.0.2.jar:/maven/org/apache/zookeeper/zookeeper/3.5.6/zookeeper-3.5.6.jar:/maven/org/apache/zookeeper/zookeeper-jute/3.5.6/zookeeper-jute-3.5.6.jar:/maven/org/apache/yetus/audience-annotations/0.5.0/audience-annotations-0.5.0.jar:/maven/org/apache/kerby/kerb-core/1.0.1/kerb-core-1.0.1.jar:/maven/org/apache/kerby/kerby-pkix/1.0.1/kerby-pkix-1.0.1.jar:/maven/org/apache/kerby/kerby-asn1/1.0.1/kerby-asn1-1.0.1.jar:/maven/org/apache/kerby/kerby-util/1.0.1/kerby-util-1.0.1.jar:/maven/org/codehaus/woodstox/stax2-api/4.2.1/stax2-api-4.2.1.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.3.0/woodstox-core-5.3.0.jar:/maven/dnsjava/dnsjava/2.1.7/dnsjava-2.1.7.jar:/maven/org/slf4j/slf4j-api/1.7.36/slf4j-api-1.7.36.jar:/maven/org/slf4j/slf4j-reload4j/1.7.36/slf4j-reload4j-1.7.36.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-3.3.4.jar:/usr/lib/jvm/java-8-openjdk-amd64/lib/tools.jar:/maven/org/eclipse/jetty/jetty-util-ajax/9.4.43.v20210629/jetty-util-ajax-9.4.43.v20210629.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.12.7/jackson-annotations-2.12.7.jar:/maven/com/google/inject/extensions/guice-servlet/4.0/guice-servlet-4.0.jar:/maven/io/netty/netty/3.10.6.Final/netty-3.10.6.Final.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-guava/1.1.1/hadoop-shaded-guava-1.1.1.jar:/maven/commons-codec/commons-codec/1.15/commons-codec-1.15.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/xerces/xercesImpl/2.12.2/xercesImpl-2.12.2.jar:/maven/xml-apis/xml-apis/1.4.01/xml-apis-1.4.01.jar -sourcepath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/hadoop-annotations.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/jdiff.jar -apidir /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/site/jdiff/xml -apiname Apache Hadoop MapReduce Common 3.3.4 -->
+<package name="org.apache.hadoop.mapred">
+</package>
+<package name="org.apache.hadoop.mapreduce">
+</package>
+<package name="org.apache.hadoop.mapreduce.v2.api.protocolrecords">
+  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest -->
+  <interface name="CancelDelegationTokenRequest"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setDelegationToken"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dToken" type="org.apache.hadoop.yarn.api.records.Token"/>
+    </method>
+    <doc>
+    <![CDATA[The request issued by the client to the {@code ResourceManager} to cancel a
+ delegation token.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest -->
+  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse -->
+  <interface name="CancelDelegationTokenResponse"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <doc>
+    <![CDATA[The response from the {@code ResourceManager} to a cancelDelegationToken
+ request.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse -->
+  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest -->
+  <interface name="GetDelegationTokenRequest"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getRenewer" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setRenewer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="java.lang.String"/>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest -->
+  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest -->
+  <interface name="RenewDelegationTokenRequest"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setDelegationToken"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dToken" type="org.apache.hadoop.yarn.api.records.Token"/>
+    </method>
+    <doc>
+    <![CDATA[The request issued by the client to renew a delegation token from
+ the {@code ResourceManager}.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest -->
+  <!-- start interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse -->
+  <interface name="RenewDelegationTokenResponse"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getNextExpirationTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setNextExpirationTime"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="expTime" type="long"/>
+    </method>
+    <doc>
+    <![CDATA[The response to a renewDelegationToken call to the {@code ResourceManager}.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse -->
+</package>
+<package name="org.apache.hadoop.mapreduce.v2.security">
+</package>
+<package name="org.apache.hadoop.yarn.proto">
+</package>
+
+</api>
diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_3.3.4.xml b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_3.3.4.xml
new file mode 100644
index 0000000..dc0a88d
--- /dev/null
+++ b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_3.3.4.xml
@@ -0,0 +1,28087 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Fri Jul 29 14:14:39 GMT 2022 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop MapReduce Core 3.3.4"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-annotations.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/jdiff.jar -verbose -classpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/classes:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-yarn-client-3.3.4.jar:/maven/ch/qos/reload4j/reload4j/1.2.22/reload4j-1.2.22.jar:/maven/org/eclipse/jetty/websocket/websocket-client/9.4.43.v20210629/websocket-client-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-client/9.4.43.v20210629/jetty-client-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-io/9.4.43.v20210629/jetty-io-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/websocket/websocket-common/9.4.43.v20210629/websocket-common-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/websocket/websocket-api/9.4.43.v20210629/websocket-api-9.4.43.v20210629.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-3.3.4.jar:/maven/org/jline/jline/3.9.0/jline-3.9.0.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-3.3.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-3.3.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/9.8.1/nimbus-jose-jwt-9.8.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/2.4.7/json-smart-2.4.7.jar:/maven/net/minidev/accessors-smart/2.4.7/accessors-smart-2.4.7.jar:/maven/org/ow2/asm/asm/5.0.4/asm-5.0.4.jar:/maven/org/apache/curator/curator-framework/4.2.0/curator-framework-4.2.0.jar:/maven/org/apache/kerby/kerb-simplekdc/1.0.1/kerb-simplekdc-1.0.1.jar:/maven/org/apache/kerby/kerb-client/1.0.1/kerb-client-1.0.1.jar:/maven/org/apache/kerby/kerby-config/1.0.1/kerby-config-1.0.1.jar:/maven/org/apache/kerby/kerb-common/1.0.1/kerb-common-1.0.1.jar:/maven/org/apache/kerby/kerb-crypto/1.0.1/kerb-crypto-1.0.1.jar:/maven/org/apache/kerby/kerb-util/1.0.1/kerb-util-1.0.1.jar:/maven/org/apache/kerby/token-provider/1.0.1/token-provider-1.0.1.jar:/maven/org/apache/kerby/kerb-admin/1.0.1/kerb-admin-1.0.1.jar:/maven/org/apache/kerby/kerb-server/1.0.1/kerb-server-1.0.1.jar:/maven/org/apache/kerby/kerb-identity/1.0.1/kerb-identity-1.0.1.jar:/maven/org/apache/kerby/kerby-xdr/1.0.1/kerby-xdr-1.0.1.jar:/maven/javax/xml/bind/jaxb-api/2.2.11/jaxb-api-2.2.11.jar:/maven/org/apache/commons/commons-compress/1.21/commons-compress-1.21.jar:/maven/javax/servlet/javax.servlet-api/3.1.0/javax.servlet-api-3.1.0.jar:/maven/org/eclipse/jetty/jetty-util/9.4.43.v20210629/jetty-util-9.4.43.v20210629.jar:/maven/com/sun/jersey/jersey-core/1.19/jersey-core-1.19.jar:/maven/javax/ws/rs/jsr311-api/1.1.1/jsr311-api-1.1.1.jar:/maven/com/sun/jersey/jersey-client/1.19/jersey-client-1.19.jar:/maven/commons-io/commons-io/2.8.0/commons-io-2.8.0.jar:/maven/com/google/inject/guice/4.0/guice-4.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/jersey-server/1.19/jersey-server-1.19.jar:/maven/com/sun/jersey/jersey-json/1.19/jersey-json-1.19.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.19/jersey-guice-1.19.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.12.7/jackson-core-2.12.7.jar:/maven/com/fasterxml/jackson/module/jackson-module-jaxb-annotations/2.12.7/jackson-module-jaxb-annotations-2.12.7.jar:/maven/jakarta/xml/bind/jakarta.xml.bind-api/2.3.2/jakarta.xml.bind-api-2.3.2.jar:/maven/com/fasterxml/jackson/jaxrs/jackson-jaxrs-json-provider/2.12.7/jackson-jaxrs-json-provider-2.12.7.jar:/maven/com/fasterxml/jackson/jaxrs/jackson-jaxrs-base/2.12.7/jackson-jaxrs-base-2.12.7.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-3.3.4.jar:/maven/com/squareup/okhttp3/okhttp/4.9.3/okhttp-4.9.3.jar:/maven/com/squareup/okio/okio/2.8.0/okio-2.8.0.jar:/maven/org/jetbrains/kotlin/kotlin-stdlib/1.4.10/kotlin-stdlib-1.4.10.jar:/maven/org/jetbrains/kotlin/kotlin-stdlib-common/1.4.10/kotlin-stdlib-common-1.4.10.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.12.7/jackson-annotations-2.12.7.jar:/maven/org/eclipse/jetty/jetty-server/9.4.43.v20210629/jetty-server-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-http/9.4.43.v20210629/jetty-http-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-util-ajax/9.4.43.v20210629/jetty-util-ajax-9.4.43.v20210629.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.12.7/jackson-databind-2.12.7.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.1.8.2/snappy-java-1.1.8.2.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-3.3.4.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-protobuf_3_7/1.1.1/hadoop-shaded-protobuf_3_7-1.1.1.jar:/maven/com/google/guava/guava/27.0-jre/guava-27.0-jre.jar:/maven/com/google/guava/failureaccess/1.0/failureaccess-1.0.jar:/maven/com/google/guava/listenablefuture/9999.0-empty-to-avoid-conflict-with-guava/listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar:/maven/org/checkerframework/checker-qual/2.5.2/checker-qual-2.5.2.jar:/maven/com/google/j2objc/j2objc-annotations/1.1/j2objc-annotations-1.1.jar:/maven/org/codehaus/mojo/animal-sniffer-annotations/1.17/animal-sniffer-annotations-1.17.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/org/apache/httpcomponents/httpclient/4.5.13/httpclient-4.5.13.jar:/maven/org/apache/httpcomponents/httpcore/4.4.13/httpcore-4.4.13.jar:/maven/commons-net/commons-net/3.6/commons-net-3.6.jar:/maven/jakarta/activation/jakarta.activation-api/1.2.1/jakarta.activation-api-1.2.1.jar:/maven/org/eclipse/jetty/jetty-servlet/9.4.43.v20210629/jetty-servlet-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-security/9.4.43.v20210629/jetty-security-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-webapp/9.4.43.v20210629/jetty-webapp-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-xml/9.4.43.v20210629/jetty-xml-9.4.43.v20210629.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-servlet/1.19/jersey-servlet-1.19.jar:/maven/commons-beanutils/commons-beanutils/1.9.4/commons-beanutils-1.9.4.jar:/maven/org/apache/commons/commons-configuration2/2.1.1/commons-configuration2-2.1.1.jar:/maven/org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.jar:/maven/org/apache/commons/commons-text/1.4/commons-text-1.4.jar:/maven/com/google/re2j/re2j/1.1/re2j-1.1.jar:/maven/com/google/code/gson/gson/2.8.9/gson-2.8.9.jar:/maven/com/jcraft/jsch/0.1.55/jsch-0.1.55.jar:/maven/org/apache/curator/curator-client/4.2.0/curator-client-4.2.0.jar:/maven/org/apache/curator/curator-recipes/4.2.0/curator-recipes-4.2.0.jar:/maven/com/google/code/findbugs/jsr305/3.0.2/jsr305-3.0.2.jar:/maven/org/apache/zookeeper/zookeeper/3.5.6/zookeeper-3.5.6.jar:/maven/org/apache/zookeeper/zookeeper-jute/3.5.6/zookeeper-jute-3.5.6.jar:/maven/org/apache/yetus/audience-annotations/0.5.0/audience-annotations-0.5.0.jar:/maven/org/apache/kerby/kerb-core/1.0.1/kerb-core-1.0.1.jar:/maven/org/apache/kerby/kerby-pkix/1.0.1/kerby-pkix-1.0.1.jar:/maven/org/apache/kerby/kerby-asn1/1.0.1/kerby-asn1-1.0.1.jar:/maven/org/apache/kerby/kerby-util/1.0.1/kerby-util-1.0.1.jar:/maven/org/codehaus/woodstox/stax2-api/4.2.1/stax2-api-4.2.1.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.3.0/woodstox-core-5.3.0.jar:/maven/dnsjava/dnsjava/2.1.7/dnsjava-2.1.7.jar:/maven/org/slf4j/slf4j-api/1.7.36/slf4j-api-1.7.36.jar:/maven/org/slf4j/slf4j-reload4j/1.7.36/slf4j-reload4j-1.7.36.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-3.3.4.jar:/usr/lib/jvm/java-8-openjdk-amd64/lib/tools.jar:/maven/com/google/inject/extensions/guice-servlet/4.0/guice-servlet-4.0.jar:/maven/io/netty/netty/3.10.6.Final/netty-3.10.6.Final.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-guava/1.1.1/hadoop-shaded-guava-1.1.1.jar:/maven/commons-codec/commons-codec/1.15/commons-codec-1.15.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/xerces/xercesImpl/2.12.2/xercesImpl-2.12.2.jar:/maven/xml-apis/xml-apis/1.4.01/xml-apis-1.4.01.jar -sourcepath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-annotations.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/jdiff.jar -apidir /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/site/jdiff/xml -apiname Apache Hadoop MapReduce Core 3.3.4 -->
+<package name="org.apache.hadoop.filecache">
+  <!-- start class org.apache.hadoop.filecache.DistributedCache -->
+  <class name="DistributedCache" extends="org.apache.hadoop.mapreduce.filecache.DistributedCache"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="DistributedCache"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="addLocalArchives"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="str" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a archive that has been localized to the conf.  Used
+ by internal DistributedCache code.
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local archives]]>
+      </doc>
+    </method>
+    <method name="addLocalFiles"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="str" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add a file that has been localized to the conf..  Used
+ by internal DistributedCache code.
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local files]]>
+      </doc>
+    </method>
+    <method name="createAllSymlink"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Internal to MapReduce framework.  Use DistributedCacheManager
+ instead.">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="jobCacheDir" type="java.io.File"/>
+      <param name="workDir" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method create symlinks for all files in a given dir in another
+ directory. Currently symlinks cannot be disabled. This is a NO-OP.
+
+ @param conf the configuration
+ @param jobCacheDir the target directory for creating symlinks
+ @param workDir the directory in which the symlinks are created
+ @throws IOException
+ @deprecated Internal to MapReduce framework.  Use DistributedCacheManager
+ instead.]]>
+      </doc>
+    </method>
+    <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="cache" type="java.net.URI"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns {@link FileStatus} of a given cache file on hdfs. Internal to
+ MapReduce.
+ @param conf configuration
+ @param cache cache file
+ @return <code>FileStatus</code> of a given cache file on hdfs
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getTimestamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="cache" type="java.net.URI"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns mtime of a given cache file on hdfs. Internal to MapReduce.
+ @param conf configuration
+ @param cache cache file
+ @return mtime of a given cache file on hdfs
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setArchiveTimestamps"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="timestamps" type="java.lang.String"/>
+      <doc>
+      <![CDATA[This is to check the timestamp of the archives to be localized.
+ Used by internal MapReduce code.
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of archives.
+ The order should be the same as the order in which the archives are added.]]>
+      </doc>
+    </method>
+    <method name="setFileTimestamps"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="timestamps" type="java.lang.String"/>
+      <doc>
+      <![CDATA[This is to check the timestamp of the files to be localized.
+ Used by internal MapReduce code.
+ @param conf Configuration which stores the timestamp's
+ @param timestamps comma separated list of timestamps of files.
+ The order should be the same as the order in which the files are added.]]>
+      </doc>
+    </method>
+    <method name="setLocalArchives"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="str" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the conf to contain the location for localized archives.  Used
+ by internal DistributedCache code.
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local archives]]>
+      </doc>
+    </method>
+    <method name="setLocalFiles"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="str" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the conf to contain the location for localized files.  Used
+ by internal DistributedCache code.
+ @param conf The conf to modify to contain the localized caches
+ @param str a comma separated list of local files]]>
+      </doc>
+    </method>
+    <field name="CACHE_FILES_SIZES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_FILES_SIZES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_FILES_SIZES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_ARCHIVES_SIZES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_ARCHIVES_SIZES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_ARCHIVES_SIZES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_ARCHIVES_TIMESTAMPS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_ARCHIVES_TIMESTAMPS} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_ARCHIVES_TIMESTAMPS}]]>
+      </doc>
+    </field>
+    <field name="CACHE_FILES_TIMESTAMPS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_FILES_TIMESTAMPS} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_FILE_TIMESTAMPS}]]>
+      </doc>
+    </field>
+    <field name="CACHE_ARCHIVES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_ARCHIVES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_ARCHIVES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_FILES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_FILES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_FILES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_LOCALARCHIVES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_LOCALARCHIVES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_LOCALARCHIVES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_LOCALFILES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_LOCALFILES} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_LOCALFILES}]]>
+      </doc>
+    </field>
+    <field name="CACHE_SYMLINK" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Warning: {@link #CACHE_SYMLINK} is not a *public* constant.
+ The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#CACHE_SYMLINK}]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[Distribute application-specific large, read-only files efficiently.
+
+ <p><code>DistributedCache</code> is a facility provided by the Map-Reduce
+ framework to cache files (text, archives, jars etc.) needed by applications.
+ </p>
+
+ <p>Applications specify the files, via urls (hdfs:// or http://) to be cached
+ via the {@link org.apache.hadoop.mapred.JobConf}. The
+ <code>DistributedCache</code> assumes that the files specified via urls are
+ already present on the {@link FileSystem} at the path specified by the url
+ and are accessible by every machine in the cluster.</p>
+
+ <p>The framework will copy the necessary files on to the worker node before
+ any tasks for the job are executed on that node. Its efficiency stems from
+ the fact that the files are only copied once per job and the ability to
+ cache archives which are un-archived on the workers.</p>
+
+ <p><code>DistributedCache</code> can be used to distribute simple, read-only
+ data/text files and/or more complex types such as archives, jars etc.
+ Archives (zip, tar and tgz/tar.gz files) are un-archived at the worker nodes.
+ Jars may be optionally added to the classpath of the tasks, a rudimentary
+ software distribution mechanism.  Files have execution permissions.
+ In older version of Hadoop Map/Reduce users could optionally ask for symlinks
+ to be created in the working directory of the child task.  In the current
+ version symlinks are always created.  If the URL does not have a fragment
+ the name of the file or directory will be used. If multiple files or
+ directories map to the same link name, the last one added, will be used.  All
+ others will not even be downloaded.</p>
+
+ <p><code>DistributedCache</code> tracks modification timestamps of the cache
+ files. Clearly the cache files should not be modified by the application
+ or externally while the job is executing.</p>
+
+ <p>Here is an illustrative example on how to use the
+ <code>DistributedCache</code>:</p>
+ <p><blockquote><pre>
+     // Setting up the cache for the application
+
+     1. Copy the requisite files to the <code>FileSystem</code>:
+
+     $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat
+     $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip
+     $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar
+     $ bin/hadoop fs -copyFromLocal mytar.tar /myapp/mytar.tar
+     $ bin/hadoop fs -copyFromLocal mytgz.tgz /myapp/mytgz.tgz
+     $ bin/hadoop fs -copyFromLocal mytargz.tar.gz /myapp/mytargz.tar.gz
+
+     2. Setup the application's <code>JobConf</code>:
+
+     JobConf job = new JobConf();
+     DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"),
+                                   job);
+     DistributedCache.addCacheArchive(new URI("/myapp/map.zip"), job);
+     DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job);
+     DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar"), job);
+     DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz"), job);
+     DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz"), job);
+
+     3. Use the cached files in the {@link org.apache.hadoop.mapred.Mapper}
+     or {@link org.apache.hadoop.mapred.Reducer}:
+
+     public static class MapClass extends MapReduceBase
+     implements Mapper&lt;K, V, K, V&gt; {
+
+       private Path[] localArchives;
+       private Path[] localFiles;
+
+       public void configure(JobConf job) {
+         // Get the cached archives/files
+         File f = new File("./map.zip/some/file/in/zip.txt");
+       }
+
+       public void map(K key, V value,
+                       OutputCollector&lt;K, V&gt; output, Reporter reporter)
+       throws IOException {
+         // Use data from the cached archives/files here
+         // ...
+         // ...
+         output.collect(k, v);
+       }
+     }
+
+ </pre></blockquote>
+
+ It is also very common to use the DistributedCache by using
+ {@link org.apache.hadoop.util.GenericOptionsParser}.
+
+ This class includes methods that should be used by users
+ (specifically those mentioned in the example above, as well
+ as {@link DistributedCache#addArchiveToClassPath(Path, Configuration)}),
+ as well as methods intended for use by the MapReduce framework
+ (e.g., {@link org.apache.hadoop.mapred.JobClient}).
+
+ @see org.apache.hadoop.mapred.JobConf
+ @see org.apache.hadoop.mapred.JobClient
+ @see org.apache.hadoop.mapreduce.Job]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.filecache.DistributedCache -->
+</package>
+<package name="org.apache.hadoop.mapred">
+  <!-- start class org.apache.hadoop.mapred.ClusterStatus -->
+  <class name="ClusterStatus" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <method name="getTaskTrackers" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of task trackers in the cluster.
+ 
+ @return the number of task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getActiveTrackerNames" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the names of task trackers in the cluster.
+ 
+ @return the active task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getBlacklistedTrackerNames" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the names of task trackers in the cluster.
+ 
+ @return the blacklisted task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getGraylistedTrackerNames" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the names of graylisted task trackers in the cluster.
+
+ The gray list of trackers is no longer available on M/R 2.x. The function
+ is kept to be compatible with M/R 1.x applications.
+
+ @return an empty graylisted task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getGraylistedTrackers" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of graylisted task trackers in the cluster.
+
+ The gray list of trackers is no longer available on M/R 2.x. The function
+ is kept to be compatible with M/R 1.x applications.
+
+ @return 0 graylisted task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getBlacklistedTrackers" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of blacklisted task trackers in the cluster.
+ 
+ @return the number of blacklisted task trackers in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getNumExcludedNodes" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of excluded hosts in the cluster.
+ @return the number of excluded hosts in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getTTExpiryInterval" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the tasktracker expiry interval for the cluster
+ @return the expiry interval in msec]]>
+      </doc>
+    </method>
+    <method name="getMapTasks" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of currently running map tasks in the cluster.
+ 
+ @return the number of currently running map tasks in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getReduceTasks" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of currently running reduce tasks in the cluster.
+ 
+ @return the number of currently running reduce tasks in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getMaxMapTasks" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the maximum capacity for running map tasks in the cluster.
+ 
+ @return the maximum capacity for running map tasks in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getMaxReduceTasks" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the maximum capacity for running reduce tasks in the cluster.
+ 
+ @return the maximum capacity for running reduce tasks in the cluster.]]>
+      </doc>
+    </method>
+    <method name="getJobTrackerStatus" return="org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the JobTracker's status.
+ 
+ @return {@link JobTrackerStatus} of the JobTracker]]>
+      </doc>
+    </method>
+    <method name="getMaxMemory" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns UNINITIALIZED_MEMORY_VALUE (-1)]]>
+      </doc>
+    </method>
+    <method name="getUsedMemory" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns UNINITIALIZED_MEMORY_VALUE (-1)]]>
+      </doc>
+    </method>
+    <method name="getBlackListedTrackersInfo" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Gets the list of blacklisted trackers along with reasons for blacklisting.
+ 
+ @return the collection of {@link BlackListInfo} objects.]]>
+      </doc>
+    </method>
+    <method name="getJobTrackerState" return="org.apache.hadoop.mapred.JobTracker.State"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current state of the <code>JobTracker</code>,
+ as {@link JobTracker.State}
+
+ {@link JobTracker.State} should no longer be used on M/R 2.x. The function
+ is kept to be compatible with M/R 1.x applications.
+
+ @return the invalid state of the <code>JobTracker</code>.]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="UNINITIALIZED_MEMORY_VALUE" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Status information on the current state of the Map-Reduce cluster.
+ 
+ <p><code>ClusterStatus</code> provides clients with information such as:
+ <ol>
+   <li>
+   Size of the cluster. 
+   </li>
+   <li>
+   Name of the trackers. 
+   </li>
+   <li>
+   Task capacity of the cluster. 
+   </li>
+   <li>
+   The number of currently running map and reduce tasks.
+   </li>
+   <li>
+   State of the <code>JobTracker</code>.
+   </li>
+   <li>
+   Details regarding black listed trackers.
+   </li>
+ </ol>
+ 
+ <p>Clients can query for the latest <code>ClusterStatus</code>, via 
+ {@link JobClient#getClusterStatus()}.</p>
+ 
+ @see JobClient]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.ClusterStatus -->
+  <!-- start class org.apache.hadoop.mapred.Counters -->
+  <class name="Counters" extends="org.apache.hadoop.mapreduce.counters.AbstractCounters"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Counters"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="Counters" type="org.apache.hadoop.mapreduce.Counters"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getGroup" return="org.apache.hadoop.mapred.Counters.Group"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="groupName" type="java.lang.String"/>
+    </method>
+    <method name="getGroupNames" return="java.util.Collection"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="makeCompactString" return="java.lang.String"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="group" type="java.lang.String"/>
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #findCounter(String, String)} instead">
+      <param name="group" type="java.lang.String"/>
+      <param name="id" type="int"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Find a counter by using strings
+ @param group the name of the group
+ @param id the id of the counter within the group (0 to N-1)
+ @param name the internal name of the counter
+ @return the counter for that name
+ @deprecated use {@link #findCounter(String, String)} instead]]>
+      </doc>
+    </method>
+    <method name="incrCounter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Enum"/>
+      <param name="amount" type="long"/>
+      <doc>
+      <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param key identifies a counter
+ @param amount amount by which counter is to be incremented]]>
+      </doc>
+    </method>
+    <method name="incrCounter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="group" type="java.lang.String"/>
+      <param name="counter" type="java.lang.String"/>
+      <param name="amount" type="long"/>
+      <doc>
+      <![CDATA[Increments the specified counter by the specified amount, creating it if
+ it didn't already exist.
+ @param group the name of the group
+ @param counter the internal name of the counter
+ @param amount amount by which counter is to be incremented]]>
+      </doc>
+    </method>
+    <method name="getCounter" return="long"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Enum"/>
+      <doc>
+      <![CDATA[Returns current value of the specified counter, or 0 if the counter
+ does not exist.
+ @param key the counter enum to lookup
+ @return the counter value or 0 if counter not found]]>
+      </doc>
+    </method>
+    <method name="incrAllCounters"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.mapred.Counters"/>
+      <doc>
+      <![CDATA[Increments multiple counters by their amounts in another Counters
+ instance.
+ @param other the other Counters instance]]>
+      </doc>
+    </method>
+    <method name="size" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #countCounters()} instead">
+      <doc>
+      <![CDATA[@return the total number of counters
+ @deprecated use {@link #countCounters()} instead]]>
+      </doc>
+    </method>
+    <method name="sum" return="org.apache.hadoop.mapred.Counters"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="a" type="org.apache.hadoop.mapred.Counters"/>
+      <param name="b" type="org.apache.hadoop.mapred.Counters"/>
+      <doc>
+      <![CDATA[Convenience method for computing the sum of two sets of counters.
+ @param a the first counters
+ @param b the second counters
+ @return a new summed counters object]]>
+      </doc>
+    </method>
+    <method name="log"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="log" type="org.slf4j.Logger"/>
+      <doc>
+      <![CDATA[Logs the current counter values.
+ @param log The log to use.]]>
+      </doc>
+    </method>
+    <method name="makeEscapedCompactString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Represent the counter in a textual format that can be converted back to
+ its object form
+ @return the string in the following format
+ {(groupName)(group-displayName)[(counterName)(displayName)(value)][]*}*]]>
+      </doc>
+    </method>
+    <method name="fromEscapedCompactString" return="org.apache.hadoop.mapred.Counters"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="compactString" type="java.lang.String"/>
+      <exception name="ParseException" type="java.text.ParseException"/>
+      <doc>
+      <![CDATA[Convert a stringified (by {@link #makeEscapedCompactString()} counter
+ representation into a counter object.
+ @param compactString to parse
+ @return a new counters object
+ @throws ParseException]]>
+      </doc>
+    </method>
+    <field name="MAX_COUNTER_LIMIT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MAX_GROUP_LIMIT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A set of named counters.
+
+ <p><code>Counters</code> represent global counters, defined either by the
+ Map-Reduce framework or applications. Each <code>Counter</code> can be of
+ any {@link Enum} type.</p>
+
+ <p><code>Counters</code> are bunched into {@link Group}s, each comprising of
+ counters from a particular <code>Enum</code> class.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.Counters -->
+  <!-- start class org.apache.hadoop.mapred.Counters.Counter -->
+  <class name="Counters.Counter" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.Counter"/>
+    <constructor name="Counter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setDisplayName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="displayName" type="java.lang.String"/>
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDisplayName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getValue" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="long"/>
+    </method>
+    <method name="increment"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="incr" type="long"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="makeEscapedCompactString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the compact stringified version of the counter in the format
+ [(actual-name)(display-name)(value)]
+ @return the stringified result]]>
+      </doc>
+    </method>
+    <method name="contentEquals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="deprecated, no comment">
+      <param name="counter" type="org.apache.hadoop.mapred.Counters.Counter"/>
+      <doc>
+      <![CDATA[Checks for (content) equality of two (basic) counters
+ @param counter to compare
+ @return true if content equals
+ @deprecated]]>
+      </doc>
+    </method>
+    <method name="getCounter" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the value of the counter]]>
+      </doc>
+    </method>
+    <method name="getUnderlyingCounter" return="org.apache.hadoop.mapreduce.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericRight" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A counter record, comprising its name and value.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.Counters.Counter -->
+  <!-- start class org.apache.hadoop.mapred.Counters.Group -->
+  <class name="Counters.Group" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.counters.CounterGroupBase"/>
+    <constructor name="Group"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getCounter" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[@param counterName the name of the counter
+ @return the value of the specified counter, or 0 if the counter does
+ not exist.]]>
+      </doc>
+    </method>
+    <method name="makeEscapedCompactString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the compact stringified version of the group in the format
+ {(actual-name)(display-name)(value)[][][]} where [] are compact strings
+ for the counters within.]]>
+      </doc>
+    </method>
+    <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #findCounter(String)} instead">
+      <param name="id" type="int"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the counter for the given id and create it if it doesn't exist.
+ @param id the numeric id of the counter within the group
+ @param name the internal counter name
+ @return the counter
+ @deprecated use {@link #findCounter(String)} instead]]>
+      </doc>
+    </method>
+    <method name="getCounterForName" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the counter for the given name and create it if it doesn't exist.
+ @param name the internal counter name
+ @return the counter]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="iterator" return="java.util.Iterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDisplayName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setDisplayName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="displayName" type="java.lang.String"/>
+    </method>
+    <method name="addCounter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counter" type="org.apache.hadoop.mapred.Counters.Counter"/>
+    </method>
+    <method name="addCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="displayName" type="java.lang.String"/>
+      <param name="value" type="long"/>
+    </method>
+    <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.String"/>
+      <param name="displayName" type="java.lang.String"/>
+    </method>
+    <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.String"/>
+      <param name="create" type="boolean"/>
+    </method>
+    <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.String"/>
+    </method>
+    <method name="size" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="incrAllCounters"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="rightGroup" type="org.apache.hadoop.mapreduce.counters.CounterGroupBase"/>
+    </method>
+    <method name="getUnderlyingGroup" return="org.apache.hadoop.mapreduce.counters.CounterGroupBase"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericRight" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[<code>Group</code> of counters, comprising of counters from a particular
+  counter {@link Enum} class.
+
+  <p><code>Group</code>handles localization of the class name and the
+  counter names.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.Counters.Group -->
+  <!-- start class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+  <class name="FileAlreadyExistsException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FileAlreadyExistsException"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileAlreadyExistsException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Used when target file already exists for any operation and 
+ is not configured to be overwritten.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FileAlreadyExistsException -->
+  <!-- start class org.apache.hadoop.mapred.FileInputFormat -->
+  <class name="FileInputFormat" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.InputFormat"/>
+    <constructor name="FileInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setMinSplitSize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="minSplitSize" type="long"/>
+    </method>
+    <method name="isSplitable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="filename" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Is the given filename splittable? Usually, true, but if the file is
+ stream compressed, it will not be.
+
+ The default implementation in <code>FileInputFormat</code> always returns
+ true. Implementations that may deal with non-splittable files <i>must</i>
+ override this method.
+
+ <code>FileInputFormat</code> implementations can override this and return
+ <code>false</code> to ensure that individual input files are never split-up
+ so that {@link Mapper}s process entire files.
+ 
+ @param fs the file system that the file is on
+ @param filename the file name to check
+ @return is this file splitable?]]>
+      </doc>
+    </method>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setInputPathFilter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="filter" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set a PathFilter to be applied to the input paths for the map-reduce job.
+
+ @param filter the PathFilter class use for filtering the input paths.]]>
+      </doc>
+    </method>
+    <method name="getInputPathFilter" return="org.apache.hadoop.fs.PathFilter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get a PathFilter instance of the filter set for the input paths.
+
+ @return the PathFilter instance set for the job, NULL if none has been set.]]>
+      </doc>
+    </method>
+    <method name="addInputPathRecursively"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="result" type="java.util.List"/>
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="inputFilter" type="org.apache.hadoop.fs.PathFilter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Add files in the input path recursively into the results.
+ @param result
+          The List to store all files.
+ @param fs
+          The FileSystem.
+ @param path
+          The input path.
+ @param inputFilter
+          The input filter that can be used to filter files/dirs. 
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression. 
+ 
+ If security is enabled, this method collects
+ delegation tokens from the input paths and adds them to the job's
+ credentials.
+ @param job the job to list input paths for and attach tokens to.
+ @return array of FileStatus objects
+ @throws IOException if zero items.]]>
+      </doc>
+    </method>
+    <method name="makeSplit" return="org.apache.hadoop.mapred.FileSplit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="start" type="long"/>
+      <param name="length" type="long"/>
+      <param name="hosts" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[A factory that makes the split for this class. It can be overridden
+ by sub-classes to make sub-types]]>
+      </doc>
+    </method>
+    <method name="makeSplit" return="org.apache.hadoop.mapred.FileSplit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="start" type="long"/>
+      <param name="length" type="long"/>
+      <param name="hosts" type="java.lang.String[]"/>
+      <param name="inMemoryHosts" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[A factory that makes the split for this class. It can be overridden
+ by sub-classes to make sub-types]]>
+      </doc>
+    </method>
+    <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="numSplits" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Splits files returned by {@link #listStatus(JobConf)} when
+ they're too big.]]>
+      </doc>
+    </method>
+    <method name="computeSplitSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="goalSize" type="long"/>
+      <param name="minSize" type="long"/>
+      <param name="blockSize" type="long"/>
+    </method>
+    <method name="getBlockIndex" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+      <param name="offset" type="long"/>
+    </method>
+    <method name="setInputPaths"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="commaSeparatedPaths" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Sets the given comma separated paths as the list of inputs 
+ for the map-reduce job.
+ 
+ @param conf Configuration of the job
+ @param commaSeparatedPaths Comma separated paths to be set as 
+        the list of inputs for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="addInputPaths"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="commaSeparatedPaths" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add the given comma separated paths to the list of inputs for
+  the map-reduce job.
+ 
+ @param conf The configuration of the job 
+ @param commaSeparatedPaths Comma separated paths to be added to
+        the list of inputs for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="setInputPaths"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="inputPaths" type="org.apache.hadoop.fs.Path[]"/>
+      <doc>
+      <![CDATA[Set the array of {@link Path}s as the list of inputs
+ for the map-reduce job.
+ 
+ @param conf Configuration of the job. 
+ @param inputPaths the {@link Path}s of the input directories/files 
+ for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="addInputPath"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+ 
+ @param conf The configuration of the job 
+ @param path {@link Path} to be added to the list of inputs for 
+            the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+ 
+ @param conf The configuration of the job 
+ @return the list of input {@link Path}s for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getSplitHosts" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+      <param name="offset" type="long"/>
+      <param name="splitSize" type="long"/>
+      <param name="clusterMap" type="org.apache.hadoop.net.NetworkTopology"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This function identifies and returns the hosts that contribute 
+ most for a given split. For calculating the contribution, rack
+ locality is treated on par with host locality, so hosts from racks
+ that contribute the most are preferred over hosts on racks that 
+ contribute less
+ @param blkLocations The list of block locations
+ @param offset 
+ @param splitSize 
+ @return an array of hosts that contribute most to this split
+ @throws IOException]]>
+      </doc>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NUM_INPUT_FILES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="INPUT_DIR_RECURSIVE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A base class for file-based {@link InputFormat}.
+ 
+ <p><code>FileInputFormat</code> is the base class for all file-based 
+ <code>InputFormat</code>s. This provides a generic implementation of
+ {@link #getSplits(JobConf, int)}.
+
+ Implementations of <code>FileInputFormat</code> can also override the
+ {@link #isSplitable(FileSystem, Path)} method to prevent input files
+ from being split-up in certain situations. Implementations that may
+ deal with non-splittable files <i>must</i> override this method, since
+ the default implementation assumes splitting is always possible.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FileInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.FileOutputCommitter -->
+  <class name="FileOutputCommitter" extends="org.apache.hadoop.mapred.OutputCommitter"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FileOutputCommitter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getWorkPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <param name="outputPath" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setupJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="commitJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="cleanupJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="abortJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <param name="runState" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setupTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="commitTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="abortTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="needsTaskCommit" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="isRecoverySupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isCommitJobRepeatable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="isRecoverySupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="recoverTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TEMP_DIR_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Temporary directory name]]>
+      </doc>
+    </field>
+    <field name="SUCCEEDED_FILE_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[An {@link OutputCommitter} that commits files specified 
+ in job output directory i.e. ${mapreduce.output.fileoutputformat.outputdir}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FileOutputCommitter -->
+  <!-- start class org.apache.hadoop.mapred.FileOutputFormat -->
+  <class name="FileOutputFormat" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.OutputFormat"/>
+    <constructor name="FileOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setCompressOutput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="compress" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether the output of the job is compressed.
+ @param conf the {@link JobConf} to modify
+ @param compress should the output of the job be compressed?]]>
+      </doc>
+    </method>
+    <method name="getCompressOutput" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Is the job output compressed?
+ @param conf the {@link JobConf} to look in
+ @return <code>true</code> if the job output should be compressed,
+         <code>false</code> otherwise]]>
+      </doc>
+    </method>
+    <method name="setOutputCompressorClass"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="codecClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param conf the {@link JobConf} to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+                   compress the job outputs]]>
+      </doc>
+    </method>
+    <method name="getOutputCompressorClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="defaultValue" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param conf the {@link JobConf} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the 
+         job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+      </doc>
+    </method>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="checkOutputSpecs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+      <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setOutputPath"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="outputDir" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Set the {@link Path} of the output directory for the map-reduce job.
+
+ @param conf The configuration of the job.
+ @param outputDir the {@link Path} of the output directory for 
+ the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get the {@link Path} to the output directory for the map-reduce job.
+ 
+ @return the {@link Path} to the output directory for the map-reduce job.
+ @see FileOutputFormat#getWorkOutputPath(JobConf)]]>
+      </doc>
+    </method>
+    <method name="getWorkOutputPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get the {@link Path} to the task's temporary output directory 
+  for the map-reduce job
+  
+ <b id="SideEffectFiles">Tasks' Side-Effect Files</b>
+ 
+ <p><i>Note:</i> The following is valid only if the {@link OutputCommitter}
+  is {@link FileOutputCommitter}. If <code>OutputCommitter</code> is not 
+  a <code>FileOutputCommitter</code>, the task's temporary output
+  directory is same as {@link #getOutputPath(JobConf)} i.e.
+  <tt>${mapreduce.output.fileoutputformat.outputdir}$</tt></p>
+  
+ <p>Some applications need to create/write-to side-files, which differ from
+ the actual job-outputs.
+ 
+ <p>In such cases there could be issues with 2 instances of the same TIP 
+ (running simultaneously e.g. speculative tasks) trying to open/write-to the
+ same file (path) on HDFS. Hence the application-writer will have to pick 
+ unique names per task-attempt (e.g. using the attemptid, say 
+ <tt>attempt_200709221812_0001_m_000000_0</tt>), not just per TIP.</p> 
+ 
+ <p>To get around this the Map-Reduce framework helps the application-writer 
+ out by maintaining a special 
+ <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> 
+ sub-directory for each task-attempt on HDFS where the output of the 
+ task-attempt goes. On successful completion of the task-attempt the files 
+ in the <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> (only) 
+ are <i>promoted</i> to <tt>${mapreduce.output.fileoutputformat.outputdir}</tt>. Of course, the 
+ framework discards the sub-directory of unsuccessful task-attempts. This 
+ is completely transparent to the application.</p>
+ 
+ <p>The application-writer can take advantage of this by creating any 
+ side-files required in <tt>${mapreduce.task.output.dir}</tt> during execution 
+ of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the 
+ framework will move them out similarly - thus she doesn't have to pick 
+ unique paths per task-attempt.</p>
+ 
+ <p><i>Note</i>: the value of <tt>${mapreduce.task.output.dir}</tt> during 
+ execution of a particular task-attempt is actually 
+ <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_{$taskid}</tt>, and this value is 
+ set by the map-reduce framework. So, just create any side-files in the 
+ path  returned by {@link #getWorkOutputPath(JobConf)} from map/reduce 
+ task to take advantage of this feature.</p>
+ 
+ <p>The entire discussion holds true for maps of jobs with 
+ reducer=NONE (i.e. 0 reduces) since output of the map, in that case, 
+ goes directly to HDFS.</p> 
+ 
+ @return the {@link Path} to the task's temporary output directory 
+ for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getTaskOutputPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Helper function to create the task's temporary output directory and 
+ return the path to the task's output file.
+ 
+ @param conf job-configuration
+ @param name temporary task-output filename
+ @return path to the task's temporary output file
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getUniqueName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Helper function to generate a name that is unique for the task.
+
+ <p>The generated name can be used to create custom files from within the
+ different tasks for the job, the names for different tasks will not collide
+ with each other.</p>
+
+ <p>The given name is postfixed with the task type, 'm' for maps, 'r' for
+ reduces and the task partition number. For example, give a name 'test'
+ running on the first map o the job the generated name will be
+ 'test-m-00000'.</p>
+
+ @param conf the configuration for the job.
+ @param name the name to make unique.
+ @return a unique name accross all tasks of the job.]]>
+      </doc>
+    </method>
+    <method name="getPathForCustomFile" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Helper function to generate a {@link Path} for a file that is unique for
+ the task within the job output directory.
+
+ <p>The path can be used to create custom files from within the map and
+ reduce tasks. The path name will be unique for each task. The path parent
+ will be the job output directory.</p>ls
+
+ <p>This method uses the {@link #getUniqueName} method to make the file name
+ unique for the task.</p>
+
+ @param conf the configuration for the job.
+ @param name the name for the file.
+ @return a unique path accross all tasks of the job.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A base class for {@link OutputFormat}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FileOutputFormat -->
+  <!-- start class org.apache.hadoop.mapred.FileSplit -->
+  <class name="FileSplit" extends="org.apache.hadoop.mapreduce.InputSplit"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.InputSplitWithLocationInfo"/>
+    <constructor name="FileSplit"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, org.apache.hadoop.mapred.JobConf"
+      static="false" final="false" visibility="public"
+      deprecated="deprecated, no comment">
+      <doc>
+      <![CDATA[Constructs a split.
+ @deprecated
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process]]>
+      </doc>
+    </constructor>
+    <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a split with host information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block, possibly null]]>
+      </doc>
+    </constructor>
+    <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[], java.lang.String[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a split with host information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block, possibly null
+ @param inMemoryHosts the list of hosts containing the block in memory]]>
+      </doc>
+    </constructor>
+    <constructor name="FileSplit" type="org.apache.hadoop.mapreduce.lib.input.FileSplit"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The file containing this split's data.]]>
+      </doc>
+    </method>
+    <method name="getStart" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The position of the first byte in the file to process.]]>
+      </doc>
+    </method>
+    <method name="getLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The number of bytes in the file to process.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getLocations" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getLocationInfo" return="org.apache.hadoop.mapred.SplitLocationInfo[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A section of an input file.  Returned by {@link
+ InputFormat#getSplits(JobConf, int)} and passed to
+ {@link InputFormat#getRecordReader(InputSplit,JobConf,Reporter)}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FileSplit -->
+  <!-- start class org.apache.hadoop.mapred.FixedLengthInputFormat -->
+  <class name="FixedLengthInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+    <constructor name="FixedLengthInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setRecordLength"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="recordLength" type="int"/>
+      <doc>
+      <![CDATA[Set the length of each record
+ @param conf configuration
+ @param recordLength the length of a record]]>
+      </doc>
+    </method>
+    <method name="getRecordLength" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get record length value
+ @param conf configuration
+ @return the record length, zero means none was set]]>
+      </doc>
+    </method>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="isSplitable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <field name="FIXED_RECORD_LENGTH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[FixedLengthInputFormat is an input format used to read input files
+ which contain fixed length records.  The content of a record need not be
+ text.  It can be arbitrary binary data.  Users must configure the record
+ length property by calling:
+ FixedLengthInputFormat.setRecordLength(conf, recordLength);<br><br> or
+ conf.setInt(FixedLengthInputFormat.FIXED_RECORD_LENGTH, recordLength);
+ <br><br>
+ @see FixedLengthRecordReader]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.FixedLengthInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.ID -->
+  <class name="ID" extends="org.apache.hadoop.mapreduce.ID"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ID" type="int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[constructs an ID object from the given int]]>
+      </doc>
+    </constructor>
+    <constructor name="ID"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[A general identifier, which internally stores the id
+ as an integer. This is the super class of {@link JobID}, 
+ {@link TaskID} and {@link TaskAttemptID}.
+ 
+ @see JobID
+ @see TaskID
+ @see TaskAttemptID]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.ID -->
+  <!-- start interface org.apache.hadoop.mapred.InputFormat -->
+  <interface name="InputFormat"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="numSplits" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Logically split the set of input files for the job.  
+ 
+ <p>Each {@link InputSplit} is then assigned to an individual {@link Mapper}
+ for processing.</p>
+
+ <p><i>Note</i>: The split is a <i>logical</i> split of the inputs and the
+ input files are not physically split into chunks. For e.g. a split could
+ be <i>&lt;input-file-path, start, offset&gt;</i> tuple.
+ 
+ @param job job configuration.
+ @param numSplits the desired number of splits, a hint.
+ @return an array of {@link InputSplit}s for the job.]]>
+      </doc>
+    </method>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the {@link RecordReader} for the given {@link InputSplit}.
+
+ <p>It is the responsibility of the <code>RecordReader</code> to respect
+ record boundaries while processing the logical split to present a 
+ record-oriented view to the individual task.</p>
+ 
+ @param split the {@link InputSplit}
+ @param job the job that this split belongs to
+ @return a {@link RecordReader}]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<code>InputFormat</code> describes the input-specification for a 
+ Map-Reduce job. 
+ 
+ <p>The Map-Reduce framework relies on the <code>InputFormat</code> of the
+ job to:<p>
+ <ol>
+   <li>
+   Validate the input-specification of the job. 
+   <li>
+   Split-up the input file(s) into logical {@link InputSplit}s, each of 
+   which is then assigned to an individual {@link Mapper}.
+   </li>
+   <li>
+   Provide the {@link RecordReader} implementation to be used to glean
+   input records from the logical <code>InputSplit</code> for processing by 
+   the {@link Mapper}.
+   </li>
+ </ol>
+ 
+ <p>The default behavior of file-based {@link InputFormat}s, typically 
+ sub-classes of {@link FileInputFormat}, is to split the 
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in 
+ bytes, of the input files. However, the {@link FileSystem} blocksize of  
+ the input files is treated as an upper bound for input splits. A lower bound 
+ on the split size can be set via 
+ <a href="{@docRoot}/../hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml#mapreduce.input.fileinputformat.split.minsize">
+ mapreduce.input.fileinputformat.split.minsize</a>.</p>
+ 
+ <p>Clearly, logical splits based on input-size is insufficient for many 
+ applications since record boundaries are to be respected. In such cases, the
+ application has to also implement a {@link RecordReader} on whom lies the
+ responsibilty to respect record-boundaries and present a record-oriented
+ view of the logical <code>InputSplit</code> to the individual task.
+
+ @see InputSplit
+ @see RecordReader
+ @see JobClient
+ @see FileInputFormat]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.InputFormat -->
+  <!-- start interface org.apache.hadoop.mapred.InputSplit -->
+  <interface name="InputSplit"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <method name="getLength" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the total number of bytes in the data of the <code>InputSplit</code>.
+ 
+ @return the number of bytes in the input split.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getLocations" return="java.lang.String[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the list of hostnames where the input split is located.
+ 
+ @return list of hostnames where data of the <code>InputSplit</code> is
+         located as an array of <code>String</code>s.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<code>InputSplit</code> represents the data to be processed by an 
+ individual {@link Mapper}. 
+
+ <p>Typically, it presents a byte-oriented view on the input and is the 
+ responsibility of {@link RecordReader} of the job to process this and present
+ a record-oriented view.
+ 
+ @see InputFormat
+ @see RecordReader]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.InputSplit -->
+  <!-- start interface org.apache.hadoop.mapred.InputSplitWithLocationInfo -->
+  <interface name="InputSplitWithLocationInfo"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.InputSplit"/>
+    <method name="getLocationInfo" return="org.apache.hadoop.mapred.SplitLocationInfo[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets info about which nodes the input split is stored on and how it is
+ stored at each location.
+ 
+ @return list of <code>SplitLocationInfo</code>s describing how the split
+    data is stored at each location. A null value indicates that all the
+    locations have the data stored on disk.
+ @throws IOException]]>
+      </doc>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.InputSplitWithLocationInfo -->
+  <!-- start class org.apache.hadoop.mapred.InvalidFileTypeException -->
+  <class name="InvalidFileTypeException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="InvalidFileTypeException"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="InvalidFileTypeException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Used when file type differs from the desired file type. like 
+ getting a file when a directory is expected. Or a wrong file type.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.InvalidFileTypeException -->
+  <!-- start class org.apache.hadoop.mapred.InvalidInputException -->
+  <class name="InvalidInputException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="InvalidInputException" type="java.util.List"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create the exception with the given list.
+ The first element of the list is used as the init cause value.
+ @param probs the list of problems to report. this list is not copied.]]>
+      </doc>
+    </constructor>
+    <method name="getProblems" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the complete list of the problems reported.
+ @return the list of problems, which must not be modified]]>
+      </doc>
+    </method>
+    <method name="getMessage" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a summary message of the problems found.
+ @return the concatenated messages from all of the problems.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class wraps a list of problems with the input, so that the user
+ can get a list of problems together instead of finding and fixing them one 
+ by one.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.InvalidInputException -->
+  <!-- start class org.apache.hadoop.mapred.InvalidJobConfException -->
+  <class name="InvalidJobConfException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="InvalidJobConfException"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="InvalidJobConfException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="InvalidJobConfException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="InvalidJobConfException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This exception is thrown when jobconf misses some mendatory attributes
+ or value of some attributes is invalid.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.InvalidJobConfException -->
+  <!-- start class org.apache.hadoop.mapred.JobClient -->
+  <class name="JobClient" extends="org.apache.hadoop.mapreduce.tools.CLI"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.AutoCloseable"/>
+    <constructor name="JobClient"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job client.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobClient" type="org.apache.hadoop.mapred.JobConf"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Build a job client with the given {@link JobConf}, and connect to the 
+ default cluster
+ 
+ @param conf the job configuration.
+ @throws IOException]]>
+      </doc>
+    </constructor>
+    <constructor name="JobClient" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Build a job client with the given {@link Configuration}, 
+ and connect to the default cluster
+ 
+ @param conf the configuration.
+ @throws IOException]]>
+      </doc>
+    </constructor>
+    <constructor name="JobClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Build a job client, connect to the indicated job tracker.
+ 
+ @param jobTrackAddr the job tracker to connect to.
+ @param conf configuration.]]>
+      </doc>
+    </constructor>
+    <method name="init"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Connect to the default cluster
+ @param conf the job configuration.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Close the <code>JobClient</code>.]]>
+      </doc>
+    </method>
+    <method name="getFs" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a filesystem handle.  We need this to prepare jobs
+ for submission to the MapReduce system.
+ 
+ @return the filesystem handle.]]>
+      </doc>
+    </method>
+    <method name="getClusterHandle" return="org.apache.hadoop.mapreduce.Cluster"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a handle to the Cluster]]>
+      </doc>
+    </method>
+    <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobFile" type="java.lang.String"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Submit a job to the MR system.
+ 
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+ 
+ @param jobFile the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+         running-job.
+ @throws FileNotFoundException
+ @throws InvalidJobConfException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Submit a job to the MR system.
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+ 
+ @param conf the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+         running-job.
+ @throws FileNotFoundException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getJobInner" return="org.apache.hadoop.mapred.RunningJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getJob" return="org.apache.hadoop.mapred.RunningJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobid" type="org.apache.hadoop.mapred.JobID"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get an {@link RunningJob} object to track an ongoing job.  Returns
+ null if the id does not correspond to any known job.
+
+ @param jobid the jobid of the job.
+ @return the {@link RunningJob} handle to track the job, null if the
+         <code>jobid</code> doesn't correspond to any known job.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getJob" return="org.apache.hadoop.mapred.RunningJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Applications should rather use {@link #getJob(JobID)}.">
+      <param name="jobid" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@deprecated Applications should rather use {@link #getJob(JobID)}.]]>
+      </doc>
+    </method>
+    <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the information of the current state of the map tasks of a job.
+ 
+ @param jobId the job to query.
+ @return the list of all of the map tips.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Applications should rather use {@link #getMapTaskReports(JobID)}">
+      <param name="jobId" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@deprecated Applications should rather use {@link #getMapTaskReports(JobID)}]]>
+      </doc>
+    </method>
+    <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the information of the current state of the reduce tasks of a job.
+ 
+ @param jobId the job to query.
+ @return the list of all of the reduce tips.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getCleanupTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the information of the current state of the cleanup tasks of a job.
+ 
+ @param jobId the job to query.
+ @return the list of all of the cleanup tips.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getSetupTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the information of the current state of the setup tasks of a job.
+ 
+ @param jobId the job to query.
+ @return the list of all of the setup tips.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Applications should rather use {@link #getReduceTaskReports(JobID)}">
+      <param name="jobId" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@deprecated Applications should rather use {@link #getReduceTaskReports(JobID)}]]>
+      </doc>
+    </method>
+    <method name="displayTasks"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobId" type="org.apache.hadoop.mapred.JobID"/>
+      <param name="type" type="java.lang.String"/>
+      <param name="state" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Display the information about a job's tasks, of a particular type and
+ in a particular state
+ 
+ @param jobId the ID of the job
+ @param type the type of the task (map/reduce/setup/cleanup)
+ @param state the state of the task 
+ (pending/running/completed/failed/killed)
+ @throws IOException when there is an error communicating with the master
+ @throws IllegalArgumentException if an invalid type/state is passed]]>
+      </doc>
+    </method>
+    <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get status information about the Map-Reduce cluster.
+  
+ @return the status information about the Map-Reduce cluster as an object
+         of {@link ClusterStatus}.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="detailed" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get status information about the Map-Reduce cluster.
+  
+ @param  detailed if true then get a detailed status including the
+         tracker names
+ @return the status information about the Map-Reduce cluster as an object
+         of {@link ClusterStatus}.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the jobs that are not completed and not failed.
+ 
+ @return array of {@link JobStatus} for the running/to-be-run jobs.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the jobs that are submitted.
+ 
+ @return array of {@link JobStatus} for the submitted jobs.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="runJob" return="org.apache.hadoop.mapred.RunningJob"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Utility that submits a job, then polls for progress until the job is
+ complete.
+ 
+ @param job the job configuration.
+ @throws IOException if the job fails]]>
+      </doc>
+    </method>
+    <method name="monitorAndPrintJob" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="job" type="org.apache.hadoop.mapred.RunningJob"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Monitor a job and print status in real-time as progress is made and tasks 
+ fail.
+ @param conf the job's configuration
+ @param job the job to track
+ @return true if the job succeeded
+ @throws IOException if communication to the JobTracker fails]]>
+      </doc>
+    </method>
+    <method name="setTaskOutputFilter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+      <doc>
+      <![CDATA[Sets the output filter for tasks. only those tasks are printed whose
+ output matches the filter. 
+ @param newValue task filter.]]>
+      </doc>
+    </method>
+    <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get the task output filter out of the JobConf.
+ 
+ @param job the JobConf to examine.
+ @return the filter level.]]>
+      </doc>
+    </method>
+    <method name="setTaskOutputFilter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/>
+      <doc>
+      <![CDATA[Modify the JobConf to set the task output filter.
+ 
+ @param job the JobConf to modify.
+ @param newValue the value to set.]]>
+      </doc>
+    </method>
+    <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns task output filter.
+ @return task filter.]]>
+      </doc>
+    </method>
+    <method name="getCounter" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="cntrs" type="org.apache.hadoop.mapreduce.Counters"/>
+      <param name="counterGroupName" type="java.lang.String"/>
+      <param name="counterName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getDefaultMaps" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get status information about the max available Maps in the cluster.
+  
+ @return the max available Maps in the cluster
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getDefaultReduces" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get status information about the max available Reduces in the cluster.
+  
+ @return the max available Reduces in the cluster
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getSystemDir" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Grab the jobtracker system directory path where job-specific files are to be placed.
+ 
+ @return the system directory where job-specific files are to be placed.]]>
+      </doc>
+    </method>
+    <method name="isJobDirValid" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobDirPath" type="org.apache.hadoop.fs.Path"/>
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Checks if the job directory is clean and has all the required components
+ for (re) starting the job]]>
+      </doc>
+    </method>
+    <method name="getStagingAreaDir" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Fetch the staging area directory for the application
+ 
+ @return path to staging area directory
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRootQueues" return="org.apache.hadoop.mapred.JobQueueInfo[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns an array of queue information objects about root level queues
+ configured
+
+ @return the array of root level JobQueueInfo objects
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getChildQueues" return="org.apache.hadoop.mapred.JobQueueInfo[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queueName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns an array of queue information objects about immediate children
+ of queue queueName.
+ 
+ @param queueName
+ @return the array of immediate children JobQueueInfo objects
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueues" return="org.apache.hadoop.mapred.JobQueueInfo[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return an array of queue information objects about all the Job Queues
+ configured.
+ 
+ @return Array of JobQueueInfo objects
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getJobsFromQueue" return="org.apache.hadoop.mapred.JobStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queueName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets all the jobs which were added to particular Job Queue
+ 
+ @param queueName name of the Job Queue
+ @return Array of jobs present in the job queue
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueInfo" return="org.apache.hadoop.mapred.JobQueueInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queueName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets the queue information associated to a particular Job Queue
+ 
+ @param queueName name of the job queue.
+ @return Queue information associated to particular queue.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueAclsForCurrentUser" return="org.apache.hadoop.mapred.QueueAclsInfo[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets the Queue ACLs for current user
+ @return array of QueueAclsInfo object for current user.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="org.apache.hadoop.io.Text"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get a delegation token for the user from the JobTracker.
+ @param renewer the user who can renew the token
+ @return the new token
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="renewDelegationToken" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link Token#renew} instead">
+      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Renew a delegation token
+ @param token the token to renew
+ @return true if the renewal went well
+ @throws InvalidToken
+ @throws IOException
+ @deprecated Use {@link Token#renew} instead]]>
+      </doc>
+    </method>
+    <method name="cancelDelegationToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link Token#cancel} instead">
+      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Cancel a delegation token from the JobTracker
+ @param token the token to cancel
+ @throws IOException
+ @deprecated Use {@link Token#cancel} instead]]>
+      </doc>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="argv" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <field name="MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MAPREDUCE_CLIENT_RETRY_POLICY_ENABLED_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MAPREDUCE_CLIENT_RETRY_POLICY_SPEC_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MAPREDUCE_CLIENT_RETRY_POLICY_SPEC_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[<code>JobClient</code> is the primary interface for the user-job to interact
+ with the cluster.
+ 
+ <code>JobClient</code> provides facilities to submit jobs, track their 
+ progress, access component-tasks' reports/logs, get the Map-Reduce cluster
+ status information etc.
+ 
+ <p>The job submission process involves:
+ <ol>
+   <li>
+   Checking the input and output specifications of the job.
+   </li>
+   <li>
+   Computing the {@link InputSplit}s for the job.
+   </li>
+   <li>
+   Setup the requisite accounting information for the {@link DistributedCache} 
+   of the job, if necessary.
+   </li>
+   <li>
+   Copying the job's jar and configuration to the map-reduce system directory 
+   on the distributed file-system. 
+   </li>
+   <li>
+   Submitting the job to the cluster and optionally monitoring
+   it's status.
+   </li>
+ </ol>
+  
+ Normally the user creates the application, describes various facets of the
+ job via {@link JobConf} and then uses the <code>JobClient</code> to submit 
+ the job and monitor its progress.
+ 
+ <p>Here is an example on how to use <code>JobClient</code>:</p>
+ <p><blockquote><pre>
+     // Create a new JobConf
+     JobConf job = new JobConf(new Configuration(), MyJob.class);
+     
+     // Specify various job-specific parameters     
+     job.setJobName("myjob");
+     
+     job.setInputPath(new Path("in"));
+     job.setOutputPath(new Path("out"));
+     
+     job.setMapperClass(MyJob.MyMapper.class);
+     job.setReducerClass(MyJob.MyReducer.class);
+
+     // Submit the job, then poll for progress until the job is complete
+     JobClient.runJob(job);
+ </pre></blockquote>
+ 
+ <b id="JobControl">Job Control</b>
+ 
+ <p>At times clients would chain map-reduce jobs to accomplish complex tasks 
+ which cannot be done via a single map-reduce job. This is fairly easy since 
+ the output of the job, typically, goes to distributed file-system and that 
+ can be used as the input for the next job.</p>
+ 
+ <p>However, this also means that the onus on ensuring jobs are complete 
+ (success/failure) lies squarely on the clients. In such situations the 
+ various job-control options are:
+ <ol>
+   <li>
+   {@link #runJob(JobConf)} : submits the job and returns only after 
+   the job has completed.
+   </li>
+   <li>
+   {@link #submitJob(JobConf)} : only submits the job, then poll the 
+   returned handle to the {@link RunningJob} to query status and make 
+   scheduling decisions.
+   </li>
+   <li>
+   {@link JobConf#setJobEndNotificationURI(String)} : setup a notification
+   on job-completion, thus avoiding polling.
+   </li>
+ </ol>
+ 
+ @see JobConf
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.JobClient -->
+  <!-- start class org.apache.hadoop.mapred.JobConf -->
+  <class name="JobConf" extends="org.apache.hadoop.conf.Configuration"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="JobConf"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a map/reduce job configuration.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobConf" type="java.lang.Class"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a map/reduce job configuration.
+ 
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a map/reduce job configuration.
+ 
+ @param conf a Configuration whose settings will be inherited.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration, java.lang.Class"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a map/reduce job configuration.
+ 
+ @param conf a Configuration whose settings will be inherited.
+ @param exampleClass a class whose containing jar is used as the job's jar.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobConf" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobConf" type="org.apache.hadoop.fs.Path"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a map/reduce configuration.
+
+ @param config a Configuration-format XML job description file.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobConf" type="boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A new map/reduce configuration where the behavior of reading from the
+ default resources can be turned off.
+ <p>
+ If the parameter {@code loadDefaults} is false, the new instance
+ will not load resources from the default files.
+
+ @param loadDefaults specifies whether to load from the default files]]>
+      </doc>
+    </constructor>
+    <method name="getCredentials" return="org.apache.hadoop.security.Credentials"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get credentials for the job.
+ @return credentials for the job]]>
+      </doc>
+    </method>
+    <method name="getJar" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user jar for the map-reduce job.
+ 
+ @return the user jar for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="setJar"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jar" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the user jar for the map-reduce job.
+ 
+ @param jar the user jar for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getJarUnpackPattern" return="java.util.regex.Pattern"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the pattern for jar contents to unpack on the tasktracker]]>
+      </doc>
+    </method>
+    <method name="setJarByClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cls" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the job's jar file by finding an example class location.
+ 
+ @param cls the example class.]]>
+      </doc>
+    </method>
+    <method name="getLocalDirs" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="deleteLocalFiles"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Use MRAsyncDiskService.moveAndDeleteAllVolumes instead.]]>
+      </doc>
+    </method>
+    <method name="deleteLocalFiles"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="subdir" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getLocalPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pathString" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Constructs a local file name. Files are distributed among configured
+ local directories.]]>
+      </doc>
+    </method>
+    <method name="getUser" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the reported username for this job.
+ 
+ @return the username]]>
+      </doc>
+    </method>
+    <method name="setUser"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the reported username for this job.
+ 
+ @param user the username for this job.]]>
+      </doc>
+    </method>
+    <method name="setKeepFailedTaskFiles"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="keep" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether the framework should keep the intermediate files for 
+ failed tasks.
+ 
+ @param keep <code>true</code> if framework should keep the intermediate files 
+             for failed tasks, <code>false</code> otherwise.]]>
+      </doc>
+    </method>
+    <method name="getKeepFailedTaskFiles" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Should the temporary files for failed tasks be kept?
+ 
+ @return should the files be kept?]]>
+      </doc>
+    </method>
+    <method name="setKeepTaskFilesPattern"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pattern" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set a regular expression for task names that should be kept. 
+ The regular expression ".*_m_000123_0" would keep the files
+ for the first instance of map 123 that ran.
+ 
+ @param pattern the java.util.regex.Pattern to match against the 
+        task names.]]>
+      </doc>
+    </method>
+    <method name="getKeepTaskFilesPattern" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the regular expression that is matched against the task names
+ to see if we need to keep the files.
+ 
+ @return the pattern as a string, if it was set, othewise null.]]>
+      </doc>
+    </method>
+    <method name="setWorkingDirectory"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dir" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Set the current working directory for the default file system.
+ 
+ @param dir the new current working directory.]]>
+      </doc>
+    </method>
+    <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current working directory for the default file system.
+ 
+ @return the directory name.]]>
+      </doc>
+    </method>
+    <method name="setNumTasksToExecutePerJvm"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numTasks" type="int"/>
+      <doc>
+      <![CDATA[Sets the number of tasks that a spawned task JVM should run
+ before it exits
+ @param numTasks the number of tasks to execute; defaults to 1;
+ -1 signifies no limit]]>
+      </doc>
+    </method>
+    <method name="getNumTasksToExecutePerJvm" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of tasks that a spawned JVM should execute]]>
+      </doc>
+    </method>
+    <method name="getInputFormat" return="org.apache.hadoop.mapred.InputFormat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link InputFormat} implementation for the map-reduce job,
+ defaults to {@link TextInputFormat} if not specified explicity.
+ 
+ @return the {@link InputFormat} implementation for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="setInputFormat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the {@link InputFormat} implementation for the map-reduce job.
+ 
+ @param theClass the {@link InputFormat} implementation for the map-reduce 
+                 job.]]>
+      </doc>
+    </method>
+    <method name="getOutputFormat" return="org.apache.hadoop.mapred.OutputFormat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link OutputFormat} implementation for the map-reduce job,
+ defaults to {@link TextOutputFormat} if not specified explicity.
+ 
+ @return the {@link OutputFormat} implementation for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getOutputCommitter" return="org.apache.hadoop.mapred.OutputCommitter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link OutputCommitter} implementation for the map-reduce job,
+ defaults to {@link FileOutputCommitter} if not specified explicitly.
+ 
+ @return the {@link OutputCommitter} implementation for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="setOutputCommitter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the {@link OutputCommitter} implementation for the map-reduce job.
+ 
+ @param theClass the {@link OutputCommitter} implementation for the map-reduce 
+                 job.]]>
+      </doc>
+    </method>
+    <method name="setOutputFormat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the {@link OutputFormat} implementation for the map-reduce job.
+ 
+ @param theClass the {@link OutputFormat} implementation for the map-reduce 
+                 job.]]>
+      </doc>
+    </method>
+    <method name="setCompressMapOutput"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="compress" type="boolean"/>
+      <doc>
+      <![CDATA[Should the map outputs be compressed before transfer?
+ 
+ @param compress should the map outputs be compressed?]]>
+      </doc>
+    </method>
+    <method name="getCompressMapOutput" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Are the outputs of the maps be compressed?
+ 
+ @return <code>true</code> if the outputs of the maps are to be compressed,
+         <code>false</code> otherwise.]]>
+      </doc>
+    </method>
+    <method name="setMapOutputCompressorClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="codecClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the given class as the  {@link CompressionCodec} for the map outputs.
+ 
+ @param codecClass the {@link CompressionCodec} class that will compress  
+                   the map outputs.]]>
+      </doc>
+    </method>
+    <method name="getMapOutputCompressorClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="defaultValue" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Get the {@link CompressionCodec} for compressing the map outputs.
+ 
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} class that should be used to compress the 
+         map outputs.
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+      </doc>
+    </method>
+    <method name="getMapOutputKeyClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the key class for the map output data. If it is not set, use the
+ (final) output key class. This allows the map output key class to be
+ different than the final output key class.
+  
+ @return the map output key class.]]>
+      </doc>
+    </method>
+    <method name="setMapOutputKeyClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the key class for the map output data. This allows the user to
+ specify the map output key class to be different than the final output
+ value class.
+ 
+ @param theClass the map output key class.]]>
+      </doc>
+    </method>
+    <method name="getMapOutputValueClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the value class for the map output data. If it is not set, use the
+ (final) output value class This allows the map output value class to be
+ different than the final output value class.
+  
+ @return the map output value class.]]>
+      </doc>
+    </method>
+    <method name="setMapOutputValueClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the value class for the map output data. This allows the user to
+ specify the map output value class to be different than the final output
+ value class.
+ 
+ @param theClass the map output value class.]]>
+      </doc>
+    </method>
+    <method name="getOutputKeyClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the key class for the job output data.
+ 
+ @return the key class for the job output data.]]>
+      </doc>
+    </method>
+    <method name="setOutputKeyClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the key class for the job output data.
+ 
+ @param theClass the key class for the job output data.]]>
+      </doc>
+    </method>
+    <method name="getOutputKeyComparator" return="org.apache.hadoop.io.RawComparator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link RawComparator} comparator used to compare keys.
+ 
+ @return the {@link RawComparator} comparator used to compare keys.]]>
+      </doc>
+    </method>
+    <method name="setOutputKeyComparatorClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the {@link RawComparator} comparator used to compare keys.
+ 
+ @param theClass the {@link RawComparator} comparator used to 
+                 compare keys.
+ @see #setOutputValueGroupingComparator(Class)]]>
+      </doc>
+    </method>
+    <method name="setKeyFieldComparatorOptions"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="keySpec" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the {@link KeyFieldBasedComparator} options used to compare keys.
+ 
+ @param keySpec the key specification of the form -k pos1[,pos2], where,
+  pos is of the form f[.c][opts], where f is the number
+  of the key field to use, and c is the number of the first character from
+  the beginning of the field. Fields and character posns are numbered 
+  starting with 1; a character position of zero in pos2 indicates the
+  field's last character. If '.c' is omitted from pos1, it defaults to 1
+  (the beginning of the field); if omitted from pos2, it defaults to 0 
+  (the end of the field). opts are ordering options. The supported options
+  are:
+    -n, (Sort numerically)
+    -r, (Reverse the result of comparison)]]>
+      </doc>
+    </method>
+    <method name="getKeyFieldComparatorOption" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link KeyFieldBasedComparator} options]]>
+      </doc>
+    </method>
+    <method name="setKeyFieldPartitionerOptions"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="keySpec" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the {@link KeyFieldBasedPartitioner} options used for 
+ {@link Partitioner}
+ 
+ @param keySpec the key specification of the form -k pos1[,pos2], where,
+  pos is of the form f[.c][opts], where f is the number
+  of the key field to use, and c is the number of the first character from
+  the beginning of the field. Fields and character posns are numbered 
+  starting with 1; a character position of zero in pos2 indicates the
+  field's last character. If '.c' is omitted from pos1, it defaults to 1
+  (the beginning of the field); if omitted from pos2, it defaults to 0 
+  (the end of the field).]]>
+      </doc>
+    </method>
+    <method name="getKeyFieldPartitionerOption" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link KeyFieldBasedPartitioner} options]]>
+      </doc>
+    </method>
+    <method name="getCombinerKeyGroupingComparator" return="org.apache.hadoop.io.RawComparator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user defined {@link WritableComparable} comparator for
+ grouping keys of inputs to the combiner.
+
+ @return comparator set by the user for grouping values.
+ @see #setCombinerKeyGroupingComparator(Class) for details.]]>
+      </doc>
+    </method>
+    <method name="getOutputValueGroupingComparator" return="org.apache.hadoop.io.RawComparator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user defined {@link WritableComparable} comparator for 
+ grouping keys of inputs to the reduce.
+ 
+ @return comparator set by the user for grouping values.
+ @see #setOutputValueGroupingComparator(Class) for details.]]>
+      </doc>
+    </method>
+    <method name="setCombinerKeyGroupingComparator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the user defined {@link RawComparator} comparator for
+ grouping keys in the input to the combiner.
+
+ <p>This comparator should be provided if the equivalence rules for keys
+ for sorting the intermediates are different from those for grouping keys
+ before each call to
+ {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p>
+
+ <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed
+ in a single call to the reduce function if K1 and K2 compare as equal.</p>
+
+ <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control
+ how keys are sorted, this can be used in conjunction to simulate
+ <i>secondary sort on values</i>.</p>
+
+ <p><i>Note</i>: This is not a guarantee of the combiner sort being
+ <i>stable</i> in any sense. (In any case, with the order of available
+ map-outputs to the combiner being non-deterministic, it wouldn't make
+ that much sense.)</p>
+
+ @param theClass the comparator class to be used for grouping keys for the
+ combiner. It should implement <code>RawComparator</code>.
+ @see #setOutputKeyComparatorClass(Class)]]>
+      </doc>
+    </method>
+    <method name="setOutputValueGroupingComparator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the user defined {@link RawComparator} comparator for 
+ grouping keys in the input to the reduce.
+ 
+ <p>This comparator should be provided if the equivalence rules for keys
+ for sorting the intermediates are different from those for grouping keys
+ before each call to 
+ {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p>
+  
+ <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed
+ in a single call to the reduce function if K1 and K2 compare as equal.</p>
+ 
+ <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control 
+ how keys are sorted, this can be used in conjunction to simulate 
+ <i>secondary sort on values</i>.</p>
+  
+ <p><i>Note</i>: This is not a guarantee of the reduce sort being 
+ <i>stable</i> in any sense. (In any case, with the order of available 
+ map-outputs to the reduce being non-deterministic, it wouldn't make 
+ that much sense.)</p>
+ 
+ @param theClass the comparator class to be used for grouping keys. 
+                 It should implement <code>RawComparator</code>.
+ @see #setOutputKeyComparatorClass(Class)
+ @see #setCombinerKeyGroupingComparator(Class)]]>
+      </doc>
+    </method>
+    <method name="getUseNewMapper" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Should the framework use the new context-object code for running
+ the mapper?
+ @return true, if the new api should be used]]>
+      </doc>
+    </method>
+    <method name="setUseNewMapper"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="flag" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether the framework should use the new api for the mapper.
+ This is the default for jobs submitted with the new Job api.
+ @param flag true, if the new api should be used]]>
+      </doc>
+    </method>
+    <method name="getUseNewReducer" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Should the framework use the new context-object code for running
+ the reducer?
+ @return true, if the new api should be used]]>
+      </doc>
+    </method>
+    <method name="setUseNewReducer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="flag" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether the framework should use the new api for the reducer. 
+ This is the default for jobs submitted with the new Job api.
+ @param flag true, if the new api should be used]]>
+      </doc>
+    </method>
+    <method name="getOutputValueClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the value class for job outputs.
+ 
+ @return the value class for job outputs.]]>
+      </doc>
+    </method>
+    <method name="setOutputValueClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the value class for job outputs.
+ 
+ @param theClass the value class for job outputs.]]>
+      </doc>
+    </method>
+    <method name="getMapperClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link Mapper} class for the job.
+ 
+ @return the {@link Mapper} class for the job.]]>
+      </doc>
+    </method>
+    <method name="setMapperClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the {@link Mapper} class for the job.
+ 
+ @param theClass the {@link Mapper} class for the job.]]>
+      </doc>
+    </method>
+    <method name="getMapRunnerClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link MapRunnable} class for the job.
+ 
+ @return the {@link MapRunnable} class for the job.]]>
+      </doc>
+    </method>
+    <method name="setMapRunnerClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Expert: Set the {@link MapRunnable} class for the job.
+ 
+ Typically used to exert greater control on {@link Mapper}s.
+ 
+ @param theClass the {@link MapRunnable} class for the job.]]>
+      </doc>
+    </method>
+    <method name="getPartitionerClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link Partitioner} used to partition {@link Mapper}-outputs 
+ to be sent to the {@link Reducer}s.
+ 
+ @return the {@link Partitioner} used to partition map-outputs.]]>
+      </doc>
+    </method>
+    <method name="setPartitionerClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the {@link Partitioner} class used to partition 
+ {@link Mapper}-outputs to be sent to the {@link Reducer}s.
+ 
+ @param theClass the {@link Partitioner} used to partition map-outputs.]]>
+      </doc>
+    </method>
+    <method name="getReducerClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link Reducer} class for the job.
+ 
+ @return the {@link Reducer} class for the job.]]>
+      </doc>
+    </method>
+    <method name="setReducerClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the {@link Reducer} class for the job.
+ 
+ @param theClass the {@link Reducer} class for the job.]]>
+      </doc>
+    </method>
+    <method name="getCombinerClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user-defined <i>combiner</i> class used to combine map-outputs 
+ before being sent to the reducers. Typically the combiner is same as the
+ the {@link Reducer} for the job i.e. {@link #getReducerClass()}.
+ 
+ @return the user-defined combiner class used to combine map-outputs.]]>
+      </doc>
+    </method>
+    <method name="setCombinerClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the user-defined <i>combiner</i> class used to combine map-outputs 
+ before being sent to the reducers. 
+ 
+ <p>The combiner is an application-specified aggregation operation, which
+ can help cut down the amount of data transferred between the 
+ {@link Mapper} and the {@link Reducer}, leading to better performance.</p>
+ 
+ <p>The framework may invoke the combiner 0, 1, or multiple times, in both
+ the mapper and reducer tasks. In general, the combiner is called as the
+ sort/merge result is written to disk. The combiner must:
+ <ul>
+   <li> be side-effect free</li>
+   <li> have the same input and output key types and the same input and 
+        output value types</li>
+ </ul>
+ 
+ <p>Typically the combiner is same as the <code>Reducer</code> for the  
+ job i.e. {@link #setReducerClass(Class)}.</p>
+ 
+ @param theClass the user-defined combiner class used to combine 
+                 map-outputs.]]>
+      </doc>
+    </method>
+    <method name="getSpeculativeExecution" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Should speculative execution be used for this job? 
+ Defaults to <code>true</code>.
+ 
+ @return <code>true</code> if speculative execution be used for this job,
+         <code>false</code> otherwise.]]>
+      </doc>
+    </method>
+    <method name="setSpeculativeExecution"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="speculativeExecution" type="boolean"/>
+      <doc>
+      <![CDATA[Turn speculative execution on or off for this job. 
+ 
+ @param speculativeExecution <code>true</code> if speculative execution 
+                             should be turned on, else <code>false</code>.]]>
+      </doc>
+    </method>
+    <method name="getMapSpeculativeExecution" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Should speculative execution be used for this job for map tasks? 
+ Defaults to <code>true</code>.
+ 
+ @return <code>true</code> if speculative execution be 
+                           used for this job for map tasks,
+         <code>false</code> otherwise.]]>
+      </doc>
+    </method>
+    <method name="setMapSpeculativeExecution"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="speculativeExecution" type="boolean"/>
+      <doc>
+      <![CDATA[Turn speculative execution on or off for this job for map tasks. 
+ 
+ @param speculativeExecution <code>true</code> if speculative execution 
+                             should be turned on for map tasks,
+                             else <code>false</code>.]]>
+      </doc>
+    </method>
+    <method name="getReduceSpeculativeExecution" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Should speculative execution be used for this job for reduce tasks? 
+ Defaults to <code>true</code>.
+ 
+ @return <code>true</code> if speculative execution be used 
+                           for reduce tasks for this job,
+         <code>false</code> otherwise.]]>
+      </doc>
+    </method>
+    <method name="setReduceSpeculativeExecution"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="speculativeExecution" type="boolean"/>
+      <doc>
+      <![CDATA[Turn speculative execution on or off for this job for reduce tasks. 
+ 
+ @param speculativeExecution <code>true</code> if speculative execution 
+                             should be turned on for reduce tasks,
+                             else <code>false</code>.]]>
+      </doc>
+    </method>
+    <method name="getNumMapTasks" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the configured number of map tasks for this job.
+ Defaults to <code>1</code>.
+ 
+ @return the number of map tasks for this job.]]>
+      </doc>
+    </method>
+    <method name="setNumMapTasks"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="n" type="int"/>
+      <doc>
+      <![CDATA[Set the number of map tasks for this job.
+ 
+ <p><i>Note</i>: This is only a <i>hint</i> to the framework. The actual 
+ number of spawned map tasks depends on the number of {@link InputSplit}s 
+ generated by the job's {@link InputFormat#getSplits(JobConf, int)}.
+  
+ A custom {@link InputFormat} is typically used to accurately control 
+ the number of map tasks for the job.</p>
+ 
+ <b id="NoOfMaps">How many maps?</b>
+ 
+ <p>The number of maps is usually driven by the total size of the inputs 
+ i.e. total number of blocks of the input files.</p>
+  
+ <p>The right level of parallelism for maps seems to be around 10-100 maps 
+ per-node, although it has been set up to 300 or so for very cpu-light map 
+ tasks. Task setup takes awhile, so it is best if the maps take at least a 
+ minute to execute.</p>
+ 
+ <p>The default behavior of file-based {@link InputFormat}s is to split the 
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in 
+ bytes, of input files. However, the {@link FileSystem} blocksize of the 
+ input files is treated as an upper bound for input splits. A lower bound 
+ on the split size can be set via 
+ <a href="{@docRoot}/../hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml#mapreduce.input.fileinputformat.split.minsize">
+ mapreduce.input.fileinputformat.split.minsize</a>.</p>
+  
+ <p>Thus, if you expect 10TB of input data and have a blocksize of 128MB, 
+ you'll end up with 82,000 maps, unless {@link #setNumMapTasks(int)} is 
+ used to set it even higher.</p>
+ 
+ @param n the number of map tasks for this job.
+ @see InputFormat#getSplits(JobConf, int)
+ @see FileInputFormat
+ @see FileSystem#getDefaultBlockSize()
+ @see FileStatus#getBlockSize()]]>
+      </doc>
+    </method>
+    <method name="getNumReduceTasks" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the configured number of reduce tasks for this job. Defaults to
+ <code>1</code>.
+
+ @return the number of reduce tasks for this job.]]>
+      </doc>
+    </method>
+    <method name="setNumReduceTasks"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="n" type="int"/>
+      <doc>
+      <![CDATA[Set the requisite number of reduce tasks for this job.
+ 
+ <b id="NoOfReduces">How many reduces?</b>
+ 
+ <p>The right number of reduces seems to be <code>0.95</code> or 
+ <code>1.75</code> multiplied by (
+ <i>available memory for reduce tasks</i>
+ (The value of this should be smaller than
+ numNodes * yarn.nodemanager.resource.memory-mb
+ since the resource of memory is shared by map tasks and other
+ applications) /
+ <a href="{@docRoot}/../hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml#mapreduce.reduce.memory.mb">
+ mapreduce.reduce.memory.mb</a>).
+ </p>
+ 
+ <p>With <code>0.95</code> all of the reduces can launch immediately and 
+ start transfering map outputs as the maps finish. With <code>1.75</code> 
+ the faster nodes will finish their first round of reduces and launch a 
+ second wave of reduces doing a much better job of load balancing.</p>
+ 
+ <p>Increasing the number of reduces increases the framework overhead, but 
+ increases load balancing and lowers the cost of failures.</p>
+ 
+ <p>The scaling factors above are slightly less than whole numbers to 
+ reserve a few reduce slots in the framework for speculative-tasks, failures
+ etc.</p> 
+
+ <b id="ReducerNone">Reducer NONE</b>
+ 
+ <p>It is legal to set the number of reduce-tasks to <code>zero</code>.</p>
+ 
+ <p>In this case the output of the map-tasks directly go to distributed 
+ file-system, to the path set by 
+ {@link FileOutputFormat#setOutputPath(JobConf, Path)}. Also, the 
+ framework doesn't sort the map-outputs before writing it out to HDFS.</p>
+ 
+ @param n the number of reduce tasks for this job.]]>
+      </doc>
+    </method>
+    <method name="getMaxMapAttempts" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ map task, as specified by the <code>mapreduce.map.maxattempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+  
+ @return the max number of attempts per map task.]]>
+      </doc>
+    </method>
+    <method name="setMaxMapAttempts"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="n" type="int"/>
+      <doc>
+      <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ map task.
+ 
+ @param n the number of attempts per map task.]]>
+      </doc>
+    </method>
+    <method name="getMaxReduceAttempts" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the configured number of maximum attempts  that will be made to run a
+ reduce task, as specified by the <code>mapreduce.reduce.maxattempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+ 
+ @return the max number of attempts per reduce task.]]>
+      </doc>
+    </method>
+    <method name="setMaxReduceAttempts"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="n" type="int"/>
+      <doc>
+      <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ reduce task.
+ 
+ @param n the number of attempts per reduce task.]]>
+      </doc>
+    </method>
+    <method name="getJobName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user-specified job name. This is only used to identify the 
+ job to the user.
+ 
+ @return the job's name, defaulting to "".]]>
+      </doc>
+    </method>
+    <method name="setJobName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the user-specified job name.
+ 
+ @param name the job's new name.]]>
+      </doc>
+    </method>
+    <method name="getSessionId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user-specified session identifier. The default is the empty string.
+
+ The session identifier is used to tag metric data that is reported to some
+ performance metrics system via the org.apache.hadoop.metrics API.  The 
+ session identifier is intended, in particular, for use by Hadoop-On-Demand 
+ (HOD) which allocates a virtual Hadoop cluster dynamically and transiently. 
+ HOD will set the session identifier by modifying the mapred-site.xml file 
+ before starting the cluster.
+
+ When not running under HOD, this identifer is expected to remain set to 
+ the empty string.
+
+ @return the session identifier, defaulting to "".]]>
+      </doc>
+    </method>
+    <method name="setSessionId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="sessionId" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the user-specified session identifier.  
+
+ @param sessionId the new session id.]]>
+      </doc>
+    </method>
+    <method name="setMaxTaskFailuresPerTracker"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="noFailures" type="int"/>
+      <doc>
+      <![CDATA[Set the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds <code>noFailures</code>, the 
+ tasktracker is <i>blacklisted</i> for this job. 
+ 
+ @param noFailures maximum no. of failures of a given job per tasktracker.]]>
+      </doc>
+    </method>
+    <method name="getMaxTaskFailuresPerTracker" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Expert: Get the maximum no. of failures of a given job per tasktracker.
+ If the no. of task failures exceeds this, the tasktracker is
+ <i>blacklisted</i> for this job. 
+ 
+ @return the maximum no. of failures of a given job per tasktracker.]]>
+      </doc>
+    </method>
+    <method name="getMaxMapTaskFailuresPercent" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the maximum percentage of map tasks that can fail without 
+ the job being aborted. 
+ 
+ Each map task is executed a minimum of {@link #getMaxMapAttempts()} 
+ attempts before being declared as <i>failed</i>.
+  
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed map-task results in
+ the job being declared as {@link JobStatus#FAILED}.
+ 
+ @return the maximum percentage of map tasks that can fail without
+         the job being aborted.]]>
+      </doc>
+    </method>
+    <method name="setMaxMapTaskFailuresPercent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="percent" type="int"/>
+      <doc>
+      <![CDATA[Expert: Set the maximum percentage of map tasks that can fail without the
+ job being aborted. 
+ 
+ Each map task is executed a minimum of {@link #getMaxMapAttempts} attempts 
+ before being declared as <i>failed</i>.
+ 
+ @param percent the maximum percentage of map tasks that can fail without 
+                the job being aborted.]]>
+      </doc>
+    </method>
+    <method name="getMaxReduceTaskFailuresPercent" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the maximum percentage of reduce tasks that can fail without 
+ the job being aborted. 
+ 
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()} 
+ attempts before being declared as <i>failed</i>.
+ 
+ Defaults to <code>zero</code>, i.e. <i>any</i> failed reduce-task results 
+ in the job being declared as {@link JobStatus#FAILED}.
+ 
+ @return the maximum percentage of reduce tasks that can fail without
+         the job being aborted.]]>
+      </doc>
+    </method>
+    <method name="setMaxReduceTaskFailuresPercent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="percent" type="int"/>
+      <doc>
+      <![CDATA[Set the maximum percentage of reduce tasks that can fail without the job
+ being aborted.
+ 
+ Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()} 
+ attempts before being declared as <i>failed</i>.
+ 
+ @param percent the maximum percentage of reduce tasks that can fail without 
+                the job being aborted.]]>
+      </doc>
+    </method>
+    <method name="setJobPriority"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="prio" type="org.apache.hadoop.mapred.JobPriority"/>
+      <doc>
+      <![CDATA[Set {@link JobPriority} for this job.
+
+ @param prio the {@link JobPriority} for this job.]]>
+      </doc>
+    </method>
+    <method name="setJobPriorityAsInteger"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="prio" type="int"/>
+      <doc>
+      <![CDATA[Set {@link JobPriority} for this job.
+
+ @param prio the {@link JobPriority} for this job.]]>
+      </doc>
+    </method>
+    <method name="getJobPriority" return="org.apache.hadoop.mapred.JobPriority"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link JobPriority} for this job.
+
+ @return the {@link JobPriority} for this job.]]>
+      </doc>
+    </method>
+    <method name="getJobPriorityAsInteger" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the priority for this job.
+
+ @return the priority for this job.]]>
+      </doc>
+    </method>
+    <method name="getProfileEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get whether the task profiling is enabled.
+ @return true if some tasks will be profiled]]>
+      </doc>
+    </method>
+    <method name="setProfileEnabled"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newValue" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether the system should collect profiler information for some of 
+ the tasks in this job? The information is stored in the user log 
+ directory.
+ @param newValue true means it should be gathered]]>
+      </doc>
+    </method>
+    <method name="getProfileParams" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the profiler configuration arguments.
+
+ The default value for this property is
+ "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s"
+ 
+ @return the parameters to pass to the task child to configure profiling]]>
+      </doc>
+    </method>
+    <method name="setProfileParams"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the profiler configuration arguments. If the string contains a '%s' it
+ will be replaced with the name of the profiling output file when the task
+ runs.
+
+ This value is passed to the task child JVM on the command line.
+
+ @param value the configuration string]]>
+      </doc>
+    </method>
+    <method name="getProfileTaskRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="isMap" type="boolean"/>
+      <doc>
+      <![CDATA[Get the range of maps or reduces to profile.
+ @param isMap is the task a map?
+ @return the task ranges]]>
+      </doc>
+    </method>
+    <method name="setProfileTaskRange"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="isMap" type="boolean"/>
+      <param name="newValue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the ranges of maps or reduces to profile. setProfileEnabled(true) 
+ must also be called.
+ @param newValue a set of integer ranges of the map ids]]>
+      </doc>
+    </method>
+    <method name="setMapDebugScript"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="mDbgScript" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the debug script to run when the map tasks fail.
+ 
+ <p>The debug script can aid debugging of failed map tasks. The script is 
+ given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+ 
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><blockquote><pre>
+ $script $stdout $stderr $syslog $jobconf.
+ </pre></blockquote>
+ 
+ <p> The script file is distributed through {@link DistributedCache} 
+ APIs. The script needs to be symlinked. </p>
+ 
+ <p>Here is an example on how to submit a script 
+ <p><blockquote><pre>
+ job.setMapDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote>
+ 
+ @param mDbgScript the script name]]>
+      </doc>
+    </method>
+    <method name="getMapDebugScript" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the map task's debug script.
+ 
+ @return the debug Script for the mapred job for failed map tasks.
+ @see #setMapDebugScript(String)]]>
+      </doc>
+    </method>
+    <method name="setReduceDebugScript"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="rDbgScript" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the debug script to run when the reduce tasks fail.
+ 
+ <p>The debug script can aid debugging of failed reduce tasks. The script
+ is given task's stdout, stderr, syslog, jobconf files as arguments.</p>
+ 
+ <p>The debug command, run on the node where the map failed, is:</p>
+ <p><blockquote><pre>
+ $script $stdout $stderr $syslog $jobconf.
+ </pre></blockquote>
+ 
+ <p> The script file is distributed through {@link DistributedCache} 
+ APIs. The script file needs to be symlinked </p>
+ 
+ <p>Here is an example on how to submit a script 
+ <p><blockquote><pre>
+ job.setReduceDebugScript("./myscript");
+ DistributedCache.createSymlink(job);
+ DistributedCache.addCacheFile("/debug/scripts/myscript#myscript");
+ </pre></blockquote>
+ 
+ @param rDbgScript the script name]]>
+      </doc>
+    </method>
+    <method name="getReduceDebugScript" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the reduce task's debug Script
+ 
+ @return the debug script for the mapred job for failed reduce tasks.
+ @see #setReduceDebugScript(String)]]>
+      </doc>
+    </method>
+    <method name="getJobEndNotificationURI" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the uri to be invoked in-order to send a notification after the job 
+ has completed (success/failure). 
+ 
+ @return the job end notification uri, <code>null</code> if it hasn't
+         been set.
+ @see #setJobEndNotificationURI(String)]]>
+      </doc>
+    </method>
+    <method name="setJobEndNotificationURI"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the uri to be invoked in-order to send a notification after the job
+ has completed (success/failure).
+ 
+ <p>The uri can contain 2 special parameters: <tt>$jobId</tt> and 
+ <tt>$jobStatus</tt>. Those, if present, are replaced by the job's 
+ identifier and completion-status respectively.</p>
+ 
+ <p>This is typically used by application-writers to implement chaining of 
+ Map-Reduce jobs in an <i>asynchronous manner</i>.</p>
+ 
+ @param uri the job end notification uri
+ @see JobStatus]]>
+      </doc>
+    </method>
+    <method name="getJobEndNotificationCustomNotifierClass" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the class to be invoked in order to send a notification
+ after the job has completed (success/failure).
+
+ @return the fully-qualified name of the class which implements
+ {@link org.apache.hadoop.mapreduce.CustomJobEndNotifier} set through the
+ {@link org.apache.hadoop.mapreduce.MRJobConfig#MR_JOB_END_NOTIFICATION_CUSTOM_NOTIFIER_CLASS}
+ property
+
+ @see JobConf#setJobEndNotificationCustomNotifierClass(java.lang.String)
+ @see org.apache.hadoop.mapreduce.MRJobConfig#MR_JOB_END_NOTIFICATION_CUSTOM_NOTIFIER_CLASS]]>
+      </doc>
+    </method>
+    <method name="setJobEndNotificationCustomNotifierClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="customNotifierClassName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Sets the class to be invoked in order to send a notification after the job
+ has completed (success/failure).
+
+ A notification url still has to be set which will be passed to
+ {@link org.apache.hadoop.mapreduce.CustomJobEndNotifier#notifyOnce(
+ java.net.URL, org.apache.hadoop.conf.Configuration)}
+ along with the Job's conf.
+
+ If this is set instead of using a simple HttpURLConnection
+ we'll create a new instance of this class
+ which should be an implementation of
+ {@link org.apache.hadoop.mapreduce.CustomJobEndNotifier},
+ and we'll invoke that.
+
+ @param customNotifierClassName the fully-qualified name of the class
+     which implements
+     {@link org.apache.hadoop.mapreduce.CustomJobEndNotifier}
+
+ @see JobConf#setJobEndNotificationURI(java.lang.String)
+ @see
+ org.apache.hadoop.mapreduce.MRJobConfig#MR_JOB_END_NOTIFICATION_CUSTOM_NOTIFIER_CLASS]]>
+      </doc>
+    </method>
+    <method name="getJobLocalDir" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get job-specific shared directory for use as scratch space
+ 
+ <p>
+ When a job starts, a shared directory is created at location
+ <code>
+ ${mapreduce.cluster.local.dir}/taskTracker/$user/jobcache/$jobid/work/ </code>.
+ This directory is exposed to the users through 
+ <code>mapreduce.job.local.dir </code>.
+ So, the tasks can use this space 
+ as scratch space and share files among them. </p>
+ This value is available as System property also.
+ 
+ @return The localized job specific shared directory]]>
+      </doc>
+    </method>
+    <method name="getMemoryForMapTask" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get memory required to run a map task of the job, in MB.
+ 
+ If a value is specified in the configuration, it is returned.
+ Else, it returns {@link JobContext#DEFAULT_MAP_MEMORY_MB}.
+ <p>
+ For backward compatibility, if the job configuration sets the
+ key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
+ from {@link #DISABLED_MEMORY_LIMIT}, that value will be used
+ after converting it from bytes to MB.
+ @return memory required to run a map task of the job, in MB,]]>
+      </doc>
+    </method>
+    <method name="setMemoryForMapTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="mem" type="long"/>
+    </method>
+    <method name="getMemoryForReduceTask" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get memory required to run a reduce task of the job, in MB.
+ 
+ If a value is specified in the configuration, it is returned.
+ Else, it returns {@link JobContext#DEFAULT_REDUCE_MEMORY_MB}.
+ <p>
+ For backward compatibility, if the job configuration sets the
+ key {@link #MAPRED_TASK_MAXVMEM_PROPERTY} to a value different
+ from {@link #DISABLED_MEMORY_LIMIT}, that value will be used
+ after converting it from bytes to MB.
+ @return memory required to run a reduce task of the job, in MB.]]>
+      </doc>
+    </method>
+    <method name="setMemoryForReduceTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="mem" type="long"/>
+    </method>
+    <method name="getQueueName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the name of the queue to which this job is submitted.
+ Defaults to 'default'.
+ 
+ @return name of the queue]]>
+      </doc>
+    </method>
+    <method name="setQueueName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queueName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the name of the queue to which this job should be submitted.
+ 
+ @param queueName Name of the queue]]>
+      </doc>
+    </method>
+    <method name="normalizeMemoryConfigValue" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="long"/>
+      <doc>
+      <![CDATA[Normalize the negative values in configuration
+ 
+ @param val
+ @return normalized value]]>
+      </doc>
+    </method>
+    <method name="findContainingJar" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="my_class" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Find a jar that contains a class of the same name, if any.
+ It will return a jar file, even if that is not the first thing
+ on the class path that has a class with the same name.
+ 
+ @param my_class the class to find.
+ @return a jar file that contains the class, or null.]]>
+      </doc>
+    </method>
+    <method name="getMaxVirtualMemoryForTask" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #getMemoryForMapTask()} and
+             {@link #getMemoryForReduceTask()}">
+      <doc>
+      <![CDATA[Get the memory required to run a task of this job, in bytes. See
+ {@link #MAPRED_TASK_MAXVMEM_PROPERTY}
+ <p>
+ This method is deprecated. Now, different memory limits can be
+ set for map and reduce tasks of a job, in MB. 
+ <p>
+ For backward compatibility, if the job configuration sets the
+ key {@link #MAPRED_TASK_MAXVMEM_PROPERTY}, that value is returned. 
+ Otherwise, this method will return the larger of the values returned by 
+ {@link #getMemoryForMapTask()} and {@link #getMemoryForReduceTask()}
+ after converting them into bytes.
+
+ @return Memory required to run a task of this job, in bytes.
+ @see #setMaxVirtualMemoryForTask(long)
+ @deprecated Use {@link #getMemoryForMapTask()} and
+             {@link #getMemoryForReduceTask()}]]>
+      </doc>
+    </method>
+    <method name="setMaxVirtualMemoryForTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #setMemoryForMapTask(long mem)}  and
+  Use {@link #setMemoryForReduceTask(long mem)}">
+      <param name="vmem" type="long"/>
+      <doc>
+      <![CDATA[Set the maximum amount of memory any task of this job can use. See
+ {@link #MAPRED_TASK_MAXVMEM_PROPERTY}
+ <p>
+ mapred.task.maxvmem is split into
+ mapreduce.map.memory.mb
+ and mapreduce.map.memory.mb,mapred
+ each of the new key are set
+ as mapred.task.maxvmem / 1024
+ as new values are in MB
+
+ @param vmem Maximum amount of virtual memory in bytes any task of this job
+             can use.
+ @see #getMaxVirtualMemoryForTask()
+ @deprecated
+  Use {@link #setMemoryForMapTask(long mem)}  and
+  Use {@link #setMemoryForReduceTask(long mem)}]]>
+      </doc>
+    </method>
+    <method name="getMaxPhysicalMemoryForTask" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="this variable is deprecated and nolonger in use.">
+      <doc>
+      <![CDATA[@deprecated this variable is deprecated and nolonger in use.]]>
+      </doc>
+    </method>
+    <method name="setMaxPhysicalMemoryForTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="mem" type="long"/>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <field name="MAPRED_TASK_MAXVMEM_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="Use {@link #MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY} and
+ {@link #MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY}">
+      <doc>
+      <![CDATA[@deprecated Use {@link #MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY} and
+ {@link #MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY}]]>
+      </doc>
+    </field>
+    <field name="UPPER_LIMIT_ON_TASK_VMEM_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="deprecated, no comment">
+      <doc>
+      <![CDATA[@deprecated]]>
+      </doc>
+    </field>
+    <field name="MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="deprecated, no comment">
+      <doc>
+      <![CDATA[@deprecated]]>
+      </doc>
+    </field>
+    <field name="MAPRED_TASK_MAXPMEM_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="deprecated, no comment">
+      <doc>
+      <![CDATA[@deprecated]]>
+      </doc>
+    </field>
+    <field name="DISABLED_MEMORY_LIMIT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A value which if set for memory related configuration options,
+ indicates that the options are turned off.
+ Deprecated because it makes no sense in the context of MR2.]]>
+      </doc>
+    </field>
+    <field name="MAPRED_LOCAL_DIR_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Property name for the configuration property mapreduce.cluster.local.dir]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_QUEUE_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Name of the queue to which jobs will be submitted, if no queue
+ name is mentioned.]]>
+      </doc>
+    </field>
+    <field name="MAPRED_JOB_MAP_MEMORY_MB_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The variable is kept for M/R 1.x applications, while M/R 2.x applications
+ should use {@link #MAPREDUCE_JOB_MAP_MEMORY_MB_PROPERTY}]]>
+      </doc>
+    </field>
+    <field name="MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The variable is kept for M/R 1.x applications, while M/R 2.x applications
+ should use {@link #MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY}]]>
+      </doc>
+    </field>
+    <field name="UNPACK_JAR_PATTERN_DEFAULT" type="java.util.regex.Pattern"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Pattern for the default unpacking behavior for job jars]]>
+      </doc>
+    </field>
+    <field name="MAPRED_TASK_JAVA_OPTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="Use {@link #MAPRED_MAP_TASK_JAVA_OPTS} or 
+                 {@link #MAPRED_REDUCE_TASK_JAVA_OPTS}">
+      <doc>
+      <![CDATA[Configuration key to set the java command line options for the child
+ map and reduce tasks.
+ 
+ Java opts for the task tracker child processes.
+ The following symbol, if present, will be interpolated: @taskid@. 
+ It is replaced by current TaskID. Any other occurrences of '@' will go 
+ unchanged.
+ For example, to enable verbose gc logging to a file named for the taskid in
+ /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
+          -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
+ 
+ The configuration variable {@link #MAPRED_TASK_ENV} can be used to pass 
+ other environment variables to the child processes.
+ 
+ @deprecated Use {@link #MAPRED_MAP_TASK_JAVA_OPTS} or 
+                 {@link #MAPRED_REDUCE_TASK_JAVA_OPTS}]]>
+      </doc>
+    </field>
+    <field name="MAPRED_MAP_TASK_JAVA_OPTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Configuration key to set the java command line options for the map tasks.
+ 
+ Java opts for the task tracker child map processes.
+ The following symbol, if present, will be interpolated: @taskid@. 
+ It is replaced by current TaskID. Any other occurrences of '@' will go 
+ unchanged.
+ For example, to enable verbose gc logging to a file named for the taskid in
+ /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
+          -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
+ 
+ The configuration variable {@link #MAPRED_MAP_TASK_ENV} can be used to pass 
+ other environment variables to the map processes.]]>
+      </doc>
+    </field>
+    <field name="MAPRED_REDUCE_TASK_JAVA_OPTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Configuration key to set the java command line options for the reduce tasks.
+ 
+ Java opts for the task tracker child reduce processes.
+ The following symbol, if present, will be interpolated: @taskid@. 
+ It is replaced by current TaskID. Any other occurrences of '@' will go 
+ unchanged.
+ For example, to enable verbose gc logging to a file named for the taskid in
+ /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
+          -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
+ 
+ The configuration variable {@link #MAPRED_REDUCE_TASK_ENV} can be used to 
+ pass process environment variables to the reduce processes.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_MAPRED_TASK_JAVA_OPTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MAPRED_TASK_ULIMIT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="Configuration key to set the maximum virtual memory available to the child
+ map and reduce tasks (in kilo-bytes). This has been deprecated and will no
+ longer have any effect.">
+      <doc>
+      <![CDATA[@deprecated
+ Configuration key to set the maximum virtual memory available to the child
+ map and reduce tasks (in kilo-bytes). This has been deprecated and will no
+ longer have any effect.]]>
+      </doc>
+    </field>
+    <field name="MAPRED_MAP_TASK_ULIMIT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="Configuration key to set the maximum virtual memory available to the
+ map tasks (in kilo-bytes). This has been deprecated and will no
+ longer have any effect.">
+      <doc>
+      <![CDATA[@deprecated
+ Configuration key to set the maximum virtual memory available to the
+ map tasks (in kilo-bytes). This has been deprecated and will no
+ longer have any effect.]]>
+      </doc>
+    </field>
+    <field name="MAPRED_REDUCE_TASK_ULIMIT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="Configuration key to set the maximum virtual memory available to the
+ reduce tasks (in kilo-bytes). This has been deprecated and will no
+ longer have any effect.">
+      <doc>
+      <![CDATA[@deprecated
+ Configuration key to set the maximum virtual memory available to the
+ reduce tasks (in kilo-bytes). This has been deprecated and will no
+ longer have any effect.]]>
+      </doc>
+    </field>
+    <field name="MAPRED_TASK_ENV" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="Use {@link #MAPRED_MAP_TASK_ENV} or 
+                 {@link #MAPRED_REDUCE_TASK_ENV}">
+      <doc>
+      <![CDATA[Configuration key to set the environment of the child map/reduce tasks.
+ 
+ The format of the value is <code>k1=v1,k2=v2</code>. Further it can 
+ reference existing environment variables via <code>$key</code> on
+ Linux or <code>%key%</code> on Windows.
+ 
+ Example:
+ <ul>
+   <li> A=foo - This will set the env variable A to foo. </li>
+ </ul>
+ 
+ @deprecated Use {@link #MAPRED_MAP_TASK_ENV} or 
+                 {@link #MAPRED_REDUCE_TASK_ENV}]]>
+      </doc>
+    </field>
+    <field name="MAPRED_MAP_TASK_ENV" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Configuration key to set the environment of the child map tasks.
+ 
+ The format of the value is <code>k1=v1,k2=v2</code>. Further it can
+ reference existing environment variables via <code>$key</code> on
+ Linux or <code>%key%</code> on Windows.
+ 
+ Example:
+ <ul>
+   <li> A=foo - This will set the env variable A to foo. </li>
+ </ul>
+
+ You can also add environment variables individually by appending
+ <code>.VARNAME</code> to this configuration key, where VARNAME is
+ the name of the environment variable.
+
+ Example:
+ <ul>
+   <li>mapreduce.map.env.VARNAME=value</li>
+ </ul>]]>
+      </doc>
+    </field>
+    <field name="MAPRED_REDUCE_TASK_ENV" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Configuration key to set the environment of the child reduce tasks.
+ 
+ The format of the value is <code>k1=v1,k2=v2</code>. Further it can 
+ reference existing environment variables via <code>$key</code> on
+ Linux or <code>%key%</code> on Windows.
+ 
+ Example:
+ <ul>
+   <li> A=foo - This will set the env variable A to foo. </li>
+ </ul>
+
+ You can also add environment variables individually by appending
+ <code>.VARNAME</code> to this configuration key, where VARNAME is
+ the name of the environment variable.
+
+ Example:
+ <ul>
+   <li>mapreduce.reduce.env.VARNAME=value</li>
+ </ul>]]>
+      </doc>
+    </field>
+    <field name="MAPRED_MAP_TASK_LOG_LEVEL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Configuration key to set the logging level for the map task.
+
+ The allowed logging levels are:
+ OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.]]>
+      </doc>
+    </field>
+    <field name="MAPRED_REDUCE_TASK_LOG_LEVEL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Configuration key to set the logging level for the reduce task.
+
+ The allowed logging levels are:
+ OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_LOG_LEVEL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default logging level for map/reduce tasks.]]>
+      </doc>
+    </field>
+    <field name="WORKFLOW_ID" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#WORKFLOW_ID} instead]]>
+      </doc>
+    </field>
+    <field name="WORKFLOW_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#WORKFLOW_NAME} instead]]>
+      </doc>
+    </field>
+    <field name="WORKFLOW_NODE_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#WORKFLOW_NODE_NAME} instead]]>
+      </doc>
+    </field>
+    <field name="WORKFLOW_ADJACENCY_PREFIX_STRING" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#WORKFLOW_ADJACENCY_PREFIX_STRING} instead]]>
+      </doc>
+    </field>
+    <field name="WORKFLOW_ADJACENCY_PREFIX_PATTERN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#WORKFLOW_ADJACENCY_PREFIX_PATTERN} instead]]>
+      </doc>
+    </field>
+    <field name="WORKFLOW_TAGS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ use {@link MRJobConfig#WORKFLOW_TAGS} instead]]>
+      </doc>
+    </field>
+    <field name="MAPREDUCE_RECOVER_JOB" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ not use it]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_MAPREDUCE_RECOVER_JOB" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The variable is kept for M/R 1.x applications, M/R 2.x applications should
+ not use it]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[A map/reduce job configuration.
+ 
+ <p><code>JobConf</code> is the primary interface for a user to describe a 
+ map-reduce job to the Hadoop framework for execution. The framework tries to
+ faithfully execute the job as-is described by <code>JobConf</code>, however:
+ <ol>
+   <li>
+   Some configuration parameters might have been marked as 
+   <a href="{@docRoot}/org/apache/hadoop/conf/Configuration.html#FinalParams">
+   final</a> by administrators and hence cannot be altered.
+   </li>
+   <li>
+   While some job parameters are straight-forward to set 
+   (e.g. {@link #setNumReduceTasks(int)}), some parameters interact subtly
+   with the rest of the framework and/or job-configuration and is relatively
+   more complex for the user to control finely
+   (e.g. {@link #setNumMapTasks(int)}).
+   </li>
+ </ol>
+ 
+ <p><code>JobConf</code> typically specifies the {@link Mapper}, combiner 
+ (if any), {@link Partitioner}, {@link Reducer}, {@link InputFormat} and 
+ {@link OutputFormat} implementations to be used etc.
+
+ <p>Optionally <code>JobConf</code> is used to specify other advanced facets 
+ of the job such as <code>Comparator</code>s to be used, files to be put in  
+ the {@link DistributedCache}, whether or not intermediate and/or job outputs 
+ are to be compressed (and how), debugability via user-provided scripts 
+ ( {@link #setMapDebugScript(String)}/{@link #setReduceDebugScript(String)}),
+ for doing post-processing on task logs, task's stdout, stderr, syslog. 
+ and etc.</p>
+ 
+ <p>Here is an example on how to configure a job via <code>JobConf</code>:</p>
+ <p><blockquote><pre>
+     // Create a new JobConf
+     JobConf job = new JobConf(new Configuration(), MyJob.class);
+     
+     // Specify various job-specific parameters     
+     job.setJobName("myjob");
+     
+     FileInputFormat.setInputPaths(job, new Path("in"));
+     FileOutputFormat.setOutputPath(job, new Path("out"));
+     
+     job.setMapperClass(MyJob.MyMapper.class);
+     job.setCombinerClass(MyJob.MyReducer.class);
+     job.setReducerClass(MyJob.MyReducer.class);
+     
+     job.setInputFormat(SequenceFileInputFormat.class);
+     job.setOutputFormat(SequenceFileOutputFormat.class);
+ </pre></blockquote>
+ 
+ @see JobClient
+ @see ClusterStatus
+ @see Tool
+ @see DistributedCache]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.JobConf -->
+  <!-- start interface org.apache.hadoop.mapred.JobConfigurable -->
+  <interface name="JobConfigurable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="configure"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Initializes a new instance from a {@link JobConf}.
+
+ @param job the configuration]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[That what may be configured.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.JobConfigurable -->
+  <!-- start interface org.apache.hadoop.mapred.JobContext -->
+  <interface name="JobContext"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.JobContext"/>
+    <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the job Configuration
+ 
+ @return JobConf]]>
+      </doc>
+    </method>
+    <method name="getProgressible" return="org.apache.hadoop.util.Progressable"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the progress mechanism for reporting progress.
+ 
+ @return progress mechanism]]>
+      </doc>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.JobContext -->
+  <!-- start class org.apache.hadoop.mapred.JobID -->
+  <class name="JobID" extends="org.apache.hadoop.mapreduce.JobID"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="JobID" type="java.lang.String, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a JobID object 
+ @param jtIdentifier jobTracker identifier
+ @param id job number]]>
+      </doc>
+    </constructor>
+    <constructor name="JobID"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="downgrade" return="org.apache.hadoop.mapred.JobID"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="old" type="org.apache.hadoop.mapreduce.JobID"/>
+      <doc>
+      <![CDATA[Downgrade a new JobID to an old one
+ @param old a new or old JobID
+ @return either old or a new JobID build to match old]]>
+      </doc>
+    </method>
+    <method name="read" return="org.apache.hadoop.mapred.JobID"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="forName" return="org.apache.hadoop.mapred.JobID"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="str" type="java.lang.String"/>
+      <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+      <doc>
+      <![CDATA[Construct a JobId object from given string 
+ @return constructed JobId object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+      </doc>
+    </method>
+    <method name="getJobIDsPattern" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jtIdentifier" type="java.lang.String"/>
+      <param name="jobId" type="java.lang.Integer"/>
+      <doc>
+      <![CDATA[Returns a regex pattern which matches task IDs. Arguments can 
+ be given null, in which case that part of the regex will be generic.  
+ For example to obtain a regex matching <i>any job</i> 
+ run on the jobtracker started at <i>200707121733</i>, we would use :
+ <pre> 
+ JobID.getTaskIDsPattern("200707121733", null);
+ </pre>
+ which will return :
+ <pre> "job_200707121733_[0-9]*" </pre> 
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @return a regex pattern matching JobIDs]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[JobID represents the immutable and unique identifier for 
+ the job. JobID consists of two parts. First part 
+ represents the jobtracker identifier, so that jobID to jobtracker map 
+ is defined. For cluster setup this string is the jobtracker 
+ start time, for local setting, it is "local".
+ Second part of the JobID is the job number. <br> 
+ An example JobID is : 
+ <code>job_200707121733_0003</code> , which represents the third job 
+ running at the jobtracker started at <code>200707121733</code>. 
+ <p>
+ Applications should never construct or parse JobID strings, but rather 
+ use appropriate constructors or {@link #forName(String)} method. 
+ 
+ @see TaskID
+ @see TaskAttemptID]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.JobID -->
+  <!-- start class org.apache.hadoop.mapred.JobPriority -->
+  <class name="JobPriority" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.mapred.JobPriority[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.mapred.JobPriority"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Used to describe the priority of the running job. 
+ DEFAULT : While submitting a job, if the user is not specifying priority,
+ YARN has the capability to pick the default priority as per its config.
+ Hence MapReduce can indicate such cases with this new enum.
+ UNDEFINED_PRIORITY : YARN supports priority as an integer. Hence other than
+ the five defined enums, YARN can consider other integers also. To generalize
+ such cases, this specific enum is used.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.JobPriority -->
+  <!-- start class org.apache.hadoop.mapred.JobQueueInfo -->
+  <class name="JobQueueInfo" extends="org.apache.hadoop.mapreduce.QueueInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="JobQueueInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor for Job Queue Info.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobQueueInfo" type="java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a new JobQueueInfo object using the queue name and the
+ scheduling information passed.
+ 
+ @param queueName Name of the job queue
+ @param schedulingInfo Scheduling Information associated with the job
+ queue]]>
+      </doc>
+    </constructor>
+    <method name="getQueueState" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Use getState() instead]]>
+      </doc>
+    </method>
+    <method name="getChildren" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Class that contains the information regarding the Job Queues which are 
+ maintained by the Hadoop Map/Reduce framework.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.JobQueueInfo -->
+  <!-- start class org.apache.hadoop.mapred.JobStatus -->
+  <class name="JobStatus" extends="org.apache.hadoop.mapreduce.JobStatus"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="JobStatus"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param runState The current state of the job]]>
+      </doc>
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, int, org.apache.hadoop.mapred.JobPriority"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param runState The current state of the job
+ @param jp Priority of the job.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, float, int, org.apache.hadoop.mapred.JobPriority"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param setupProgress The progress made on the setup
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on the cleanup
+ @param runState The current state of the job
+ @param jp Priority of the job.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, int, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on cleanup
+ @param runState The current state of the job
+ @param user userid of the person who submitted the job.
+ @param jobName user-specified job name.
+ @param jobFile job configuration file. 
+ @param trackingUrl link to the web-ui for details of the job.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, int, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param runState The current state of the job
+ @param user userid of the person who submitted the job.
+ @param jobName user-specified job name.
+ @param jobFile job configuration file. 
+ @param trackingUrl link to the web-ui for details of the job.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, int, org.apache.hadoop.mapred.JobPriority, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param runState The current state of the job
+ @param jp Priority of the job.
+ @param user userid of the person who submitted the job.
+ @param jobName user-specified job name.
+ @param jobFile job configuration file. 
+ @param trackingUrl link to the web-ui for details of the job.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, float, int, org.apache.hadoop.mapred.JobPriority, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param setupProgress The progress made on the setup
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on the cleanup
+ @param runState The current state of the job
+ @param jp Priority of the job.
+ @param user userid of the person who submitted the job.
+ @param jobName user-specified job name.
+ @param jobFile job configuration file.
+ @param trackingUrl link to the web-ui for details of the job.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, float, int, org.apache.hadoop.mapred.JobPriority, java.lang.String, java.lang.String, java.lang.String, java.lang.String, boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param setupProgress The progress made on the setup
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on the cleanup
+ @param runState The current state of the job
+ @param jp Priority of the job.
+ @param user userid of the person who submitted the job.
+ @param jobName user-specified job name.
+ @param jobFile job configuration file.
+ @param trackingUrl link to the web-ui for details of the job.
+ @param isUber Whether job running in uber mode]]>
+      </doc>
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, float, int, org.apache.hadoop.mapred.JobPriority, java.lang.String, java.lang.String, java.lang.String, java.lang.String, boolean, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param setupProgress The progress made on the setup
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on the cleanup
+ @param runState The current state of the job
+ @param jp Priority of the job.
+ @param user userid of the person who submitted the job.
+ @param jobName user-specified job name.
+ @param jobFile job configuration file.
+ @param trackingUrl link to the web-ui for details of the job.
+ @param isUber Whether job running in uber mode
+ @param historyFile history file]]>
+      </doc>
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, float, int, org.apache.hadoop.mapred.JobPriority, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param setupProgress The progress made on the setup
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on the cleanup
+ @param runState The current state of the job
+ @param jp Priority of the job.
+ @param user userid of the person who submitted the job.
+ @param jobName user-specified job name.
+ @param queue job queue name.
+ @param jobFile job configuration file.
+ @param trackingUrl link to the web-ui for details of the job.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, float, int, org.apache.hadoop.mapred.JobPriority, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String, boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param setupProgress The progress made on the setup
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on the cleanup
+ @param runState The current state of the job
+ @param jp Priority of the job.
+ @param user userid of the person who submitted the job.
+ @param jobName user-specified job name.
+ @param queue job queue name.
+ @param jobFile job configuration file. 
+ @param trackingUrl link to the web-ui for details of the job.
+ @param isUber Whether job running in uber mode]]>
+      </doc>
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapred.JobID, float, float, float, float, int, org.apache.hadoop.mapred.JobPriority, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String, boolean, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param setupProgress The progress made on the setup
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on the cleanup
+ @param runState The current state of the job
+ @param jp Priority of the job.
+ @param user userid of the person who submitted the job.
+ @param jobName user-specified job name.
+ @param queue job queue name.
+ @param jobFile job configuration file.
+ @param trackingUrl link to the web-ui for details of the job.
+ @param isUber Whether job running in uber mode
+ @param historyFile history file]]>
+      </doc>
+    </constructor>
+    <method name="getJobRunState" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="state" type="int"/>
+      <doc>
+      <![CDATA[Helper method to get human-readable state of the job.
+ @param state job state
+ @return human-readable state of the job]]>
+      </doc>
+    </method>
+    <method name="downgrade" return="org.apache.hadoop.mapred.JobStatus"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="stat" type="org.apache.hadoop.mapreduce.JobStatus"/>
+    </method>
+    <method name="getJobId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use getJobID instead">
+      <doc>
+      <![CDATA[@deprecated use getJobID instead]]>
+      </doc>
+    </method>
+    <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return The jobid of the Job]]>
+      </doc>
+    </method>
+    <method name="getJobPriority" return="org.apache.hadoop.mapred.JobPriority"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the priority of the job
+ @return job priority]]>
+      </doc>
+    </method>
+    <method name="setMapProgress"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="p" type="float"/>
+      <doc>
+      <![CDATA[Sets the map progress of this job
+ @param p The value of map progress to set to]]>
+      </doc>
+    </method>
+    <method name="setCleanupProgress"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="p" type="float"/>
+      <doc>
+      <![CDATA[Sets the cleanup progress of this job
+ @param p The value of cleanup progress to set to]]>
+      </doc>
+    </method>
+    <method name="setSetupProgress"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="p" type="float"/>
+      <doc>
+      <![CDATA[Sets the setup progress of this job
+ @param p The value of setup progress to set to]]>
+      </doc>
+    </method>
+    <method name="setReduceProgress"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="p" type="float"/>
+      <doc>
+      <![CDATA[Sets the reduce progress of this Job
+ @param p The value of reduce progress to set to]]>
+      </doc>
+    </method>
+    <method name="setFinishTime"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="finishTime" type="long"/>
+      <doc>
+      <![CDATA[Set the finish time of the job
+ @param finishTime The finishTime of the job]]>
+      </doc>
+    </method>
+    <method name="setHistoryFile"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="historyFile" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the job history file url for a completed job]]>
+      </doc>
+    </method>
+    <method name="setTrackingUrl"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="trackingUrl" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the link to the web-ui for details of the job.]]>
+      </doc>
+    </method>
+    <method name="setRetired"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Set the job retire flag to true.]]>
+      </doc>
+    </method>
+    <method name="getRunState" return="int"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return running state of the job]]>
+      </doc>
+    </method>
+    <method name="setStartTime"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="startTime" type="long"/>
+      <doc>
+      <![CDATA[Set the start time of the job
+ @param startTime The startTime of the job]]>
+      </doc>
+    </method>
+    <method name="setUsername"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="userName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[@param userName The username of the job]]>
+      </doc>
+    </method>
+    <method name="setJobACLs"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="acls" type="java.util.Map"/>
+    </method>
+    <method name="setFailureInfo"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="failureInfo" type="java.lang.String"/>
+    </method>
+    <method name="setJobPriority"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jp" type="org.apache.hadoop.mapred.JobPriority"/>
+      <doc>
+      <![CDATA[Set the priority of the job, defaulting to NORMAL.
+ @param jp new job priority]]>
+      </doc>
+    </method>
+    <method name="mapProgress" return="float"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Percentage of progress in maps]]>
+      </doc>
+    </method>
+    <method name="cleanupProgress" return="float"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Percentage of progress in cleanup]]>
+      </doc>
+    </method>
+    <method name="setupProgress" return="float"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Percentage of progress in setup]]>
+      </doc>
+    </method>
+    <method name="reduceProgress" return="float"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Percentage of progress in reduce]]>
+      </doc>
+    </method>
+    <field name="RUNNING" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SUCCEEDED" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FAILED" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="PREP" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="KILLED" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Describes the current status of a job.  This is
+ not intended to be a comprehensive piece of data.
+ For that, look at JobProfile.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.JobStatus -->
+  <!-- start class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+  <class name="KeyValueLineRecordReader" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.RecordReader"/>
+    <constructor name="KeyValueLineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="getKeyClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createKey" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createValue" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="findSeparator" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="utf" type="byte[]"/>
+      <param name="start" type="int"/>
+      <param name="length" type="int"/>
+      <param name="sep" type="byte"/>
+    </method>
+    <method name="next" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.io.Text"/>
+      <param name="value" type="org.apache.hadoop.io.Text"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read key/value pair in a line.]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getPos" return="long"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[This class treats a line in the input as a key/value pair separated by a 
+ separator character. The separator can be specified in config file 
+ under the attribute name mapreduce.input.keyvaluelinerecordreader.key.value.separator. The default
+ separator is the tab character ('\t').]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.KeyValueLineRecordReader -->
+  <!-- start class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+  <class name="KeyValueTextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+    <constructor name="KeyValueTextInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <method name="isSplitable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line. Each line
+ is divided into key and value parts by a separator byte. If no such a byte
+ exists, the key will be the entire line and value will be empty.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.KeyValueTextInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.MapFileOutputFormat -->
+  <class name="MapFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MapFileOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getReaders" return="org.apache.hadoop.io.MapFile.Reader[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="dir" type="org.apache.hadoop.fs.Path"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Open the output generated by this format.]]>
+      </doc>
+    </method>
+    <method name="getEntry" return="org.apache.hadoop.io.Writable"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="readers" type="org.apache.hadoop.io.MapFile.Reader[]"/>
+      <param name="partitioner" type="org.apache.hadoop.mapred.Partitioner"/>
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get an entry from output generated by this class.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[An {@link OutputFormat} that writes {@link MapFile}s.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.MapFileOutputFormat -->
+  <!-- start interface org.apache.hadoop.mapred.Mapper -->
+  <interface name="Mapper"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+    <implements name="org.apache.hadoop.io.Closeable"/>
+    <method name="map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K1"/>
+      <param name="value" type="V1"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Maps a single input key/value pair into an intermediate key/value pair.
+ 
+ <p>Output pairs need not be of the same types as input pairs.  A given 
+ input pair may map to zero or many output pairs.  Output pairs are 
+ collected with calls to 
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress 
+ or just indicate that they are alive. In scenarios where the application 
+ takes significant amount of time to process individual key/value
+ pairs, this is crucial since the framework might assume that the task has 
+ timed-out and kill that task. The other way of avoiding this is to set 
+ <a href="{@docRoot}/../hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml#mapreduce.task.timeout">
+ mapreduce.task.timeout</a> to a high-enough value (or even zero for no 
+ time-outs).</p>
+ 
+ @param key the input key.
+ @param value the input value.
+ @param output collects mapped keys and values.
+ @param reporter facility to report progress.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Maps input key/value pairs to a set of intermediate key/value pairs.  
+ 
+ <p>Maps are the individual tasks which transform input records into a 
+ intermediate records. The transformed intermediate records need not be of 
+ the same type as the input records. A given input pair may map to zero or 
+ many output pairs.</p> 
+ 
+ <p>The Hadoop Map-Reduce framework spawns one map task for each 
+ {@link InputSplit} generated by the {@link InputFormat} for the job.
+ <code>Mapper</code> implementations can access the {@link JobConf} for the 
+ job via the {@link JobConfigurable#configure(JobConf)} and initialize
+ themselves. Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+ 
+ <p>The framework then calls 
+ {@link #map(Object, Object, OutputCollector, Reporter)} 
+ for each key/value pair in the <code>InputSplit</code> for that task.</p>
+ 
+ <p>All intermediate values associated with a given output key are 
+ subsequently grouped by the framework, and passed to a {@link Reducer} to  
+ determine the final output. Users can control the grouping by specifying
+ a <code>Comparator</code> via 
+ {@link JobConf#setOutputKeyComparatorClass(Class)}.</p>
+
+ <p>The grouped <code>Mapper</code> outputs are partitioned per 
+ <code>Reducer</code>. Users can control which keys (and hence records) go to 
+ which <code>Reducer</code> by implementing a custom {@link Partitioner}.
+ 
+ <p>Users can optionally specify a <code>combiner</code>, via 
+ {@link JobConf#setCombinerClass(Class)}, to perform local aggregation of the 
+ intermediate outputs, which helps to cut down the amount of data transferred 
+ from the <code>Mapper</code> to the <code>Reducer</code>.
+ 
+ <p>The intermediate, grouped outputs are always stored in 
+ {@link SequenceFile}s. Applications can specify if and how the intermediate
+ outputs are to be compressed and which {@link CompressionCodec}s are to be
+ used via the <code>JobConf</code>.</p>
+  
+ <p>If the job has 
+ <a href="{@docRoot}/org/apache/hadoop/mapred/JobConf.html#ReducerNone">zero
+ reduces</a> then the output of the <code>Mapper</code> is directly written
+ to the {@link FileSystem} without grouping by keys.</p>
+ 
+ <p>Example:</p>
+ <p><blockquote><pre>
+     public class MyMapper&lt;K extends WritableComparable, V extends Writable&gt; 
+     extends MapReduceBase implements Mapper&lt;K, V, K, V&gt; {
+     
+       static enum MyCounters { NUM_RECORDS }
+       
+       private String mapTaskId;
+       private String inputFile;
+       private int noRecords = 0;
+       
+       public void configure(JobConf job) {
+         mapTaskId = job.get(JobContext.TASK_ATTEMPT_ID);
+         inputFile = job.get(JobContext.MAP_INPUT_FILE);
+       }
+       
+       public void map(K key, V val,
+                       OutputCollector&lt;K, V&gt; output, Reporter reporter)
+       throws IOException {
+         // Process the &lt;key, value&gt; pair (assume this takes a while)
+         // ...
+         // ...
+         
+         // Let the framework know that we are alive, and kicking!
+         // reporter.progress();
+         
+         // Process some more
+         // ...
+         // ...
+         
+         // Increment the no. of &lt;key, value&gt; pairs processed
+         ++noRecords;
+
+         // Increment counters
+         reporter.incrCounter(NUM_RECORDS, 1);
+        
+         // Every 100 records update application-level status
+         if ((noRecords%100) == 0) {
+           reporter.setStatus(mapTaskId + " processed " + noRecords + 
+                              " from input-file: " + inputFile); 
+         }
+         
+         // Output the result
+         output.collect(key, val);
+       }
+     }
+ </pre></blockquote>
+
+ <p>Applications may write a custom {@link MapRunnable} to exert greater
+ control on map processing e.g. multi-threaded <code>Mapper</code>s etc.</p>
+ 
+ @see JobConf
+ @see InputFormat
+ @see Partitioner  
+ @see Reducer
+ @see MapReduceBase
+ @see MapRunnable
+ @see SequenceFile]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.Mapper -->
+  <!-- start class org.apache.hadoop.mapred.MapReduceBase -->
+  <class name="MapReduceBase" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Closeable"/>
+    <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+    <constructor name="MapReduceBase"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Default implementation that does nothing.]]>
+      </doc>
+    </method>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Default implementation that does nothing.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Base class for {@link Mapper} and {@link Reducer} implementations.
+ 
+ <p>Provides default no-op implementations for a few methods, most non-trivial
+ applications need to override some of them.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.MapReduceBase -->
+  <!-- start interface org.apache.hadoop.mapred.MapRunnable -->
+  <interface name="MapRunnable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+    <method name="run"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="input" type="org.apache.hadoop.mapred.RecordReader"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Start mapping input <tt>&lt;key, value&gt;</tt> pairs.
+  
+ <p>Mapping of input records to output records is complete when this method 
+ returns.</p>
+ 
+ @param input the {@link RecordReader} to read the input records.
+ @param output the {@link OutputCollector} to collect the outputrecords.
+ @param reporter {@link Reporter} to report progress, status-updates etc.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Expert: Generic interface for {@link Mapper}s.
+ 
+ <p>Custom implementations of <code>MapRunnable</code> can exert greater 
+ control on map processing e.g. multi-threaded, asynchronous mappers etc.</p>
+ 
+ @see Mapper]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.MapRunnable -->
+  <!-- start class org.apache.hadoop.mapred.MapRunner -->
+  <class name="MapRunner" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.MapRunnable"/>
+    <constructor name="MapRunner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <method name="run"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="input" type="org.apache.hadoop.mapred.RecordReader"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getMapper" return="org.apache.hadoop.mapred.Mapper"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Default {@link MapRunnable} implementation.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.MapRunner -->
+  <!-- start class org.apache.hadoop.mapred.MultiFileInputFormat -->
+  <class name="MultiFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MultiFileInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="numSplits" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[An abstract {@link InputFormat} that returns {@link MultiFileSplit}'s
+ in {@link #getSplits(JobConf, int)} method. Splits are constructed from 
+ the files under the input paths. Each split returned contains <i>nearly</i>
+ equal content length. <br>  
+ Subclasses implement {@link #getRecordReader(InputSplit, JobConf, Reporter)}
+ to construct <code>RecordReader</code>'s for <code>MultiFileSplit</code>'s.
+ @see MultiFileSplit]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.MultiFileInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.MultiFileSplit -->
+  <class name="MultiFileSplit" extends="org.apache.hadoop.mapred.lib.CombineFileSplit"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MultiFileSplit" type="org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path[], long[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getLocations" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A sub-collection of input files. Unlike {@link FileSplit}, MultiFileSplit 
+ class does not represent a split of a file, but a split of input files 
+ into smaller sets. The atomic unit of split is a file. <br> 
+ MultiFileSplit can be used to implement {@link RecordReader}'s, with 
+ reading one record per file.
+ @see FileSplit
+ @see MultiFileInputFormat]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.MultiFileSplit -->
+  <!-- start interface org.apache.hadoop.mapred.OutputCollector -->
+  <interface name="OutputCollector"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="collect"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Adds a key/value pair to the output.
+
+ @param key the key to collect.
+ @param value to value to collect.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Collects the <code>&lt;key, value&gt;</code> pairs output by {@link Mapper}s
+ and {@link Reducer}s.
+  
+ <p><code>OutputCollector</code> is the generalization of the facility 
+ provided by the Map-Reduce framework to collect data output by either the 
+ <code>Mapper</code> or the <code>Reducer</code> i.e. intermediate outputs 
+ or the output of the job.</p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.OutputCollector -->
+  <!-- start class org.apache.hadoop.mapred.OutputCommitter -->
+  <class name="OutputCommitter" extends="org.apache.hadoop.mapreduce.OutputCommitter"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="OutputCommitter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setupJob"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[For the framework to setup the job output during initialization.  This is
+ called from the application master process for the entire job. This will be
+ called multiple times, once per job attempt.
+ 
+ @param jobContext Context of the job whose output is being written.
+ @throws IOException if temporary output could not be created]]>
+      </doc>
+    </method>
+    <method name="cleanupJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #commitJob(JobContext)} or 
+                 {@link #abortJob(JobContext, int)} instead.">
+      <param name="jobContext" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[For cleaning up the job's output after job completion.  This is called
+ from the application master process for the entire job. This may be called
+ multiple times.
+ 
+ @param jobContext Context of the job whose output is being written.
+ @throws IOException
+ @deprecated Use {@link #commitJob(JobContext)} or 
+                 {@link #abortJob(JobContext, int)} instead.]]>
+      </doc>
+    </method>
+    <method name="commitJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[For committing job's output after successful job completion. Note that this
+ is invoked for jobs with final runstate as SUCCESSFUL.  This is called
+ from the application master process for the entire job. This is guaranteed
+ to only be called once.  If it throws an exception the entire job will
+ fail.
+ 
+ @param jobContext Context of the job whose output is being written.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="abortJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapred.JobContext"/>
+      <param name="status" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[For aborting an unsuccessful job's output. Note that this is invoked for 
+ jobs with final runstate as {@link JobStatus#FAILED} or 
+ {@link JobStatus#KILLED}. This is called from the application
+ master process for the entire job. This may be called multiple times.
+ 
+ @param jobContext Context of the job whose output is being written.
+ @param status final runstate of the job
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setupTask"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Sets up output for the task. This is called from each individual task's
+ process that will output to HDFS, and it is called just for that task. This
+ may be called multiple times for the same task, but for different task
+ attempts.
+ 
+ @param taskContext Context of the task whose output is being written.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="needsTaskCommit" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Check whether task needs a commit.  This is called from each individual
+ task's process that will output to HDFS, and it is called just for that
+ task.
+ 
+ @param taskContext
+ @return true/false
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="commitTask"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[To promote the task's temporary output to final output location.
+ If {@link #needsTaskCommit(TaskAttemptContext)} returns true and this
+ task is the task that the AM determines finished first, this method
+ is called to commit an individual task's output.  This is to mark
+ that tasks output as complete, as {@link #commitJob(JobContext)} will 
+ also be called later on if the entire job finished successfully. This
+ is called from a task's process. This may be called multiple times for the
+ same task, but different task attempts.  It should be very rare for this to
+ be called multiple times and requires odd networking failures to make this
+ happen. In the future the Hadoop framework may eliminate this race.
+ 
+ @param taskContext Context of the task whose output is being written.
+ @throws IOException if commit is not]]>
+      </doc>
+    </method>
+    <method name="abortTask"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Discard the task output. This is called from a task's process to clean 
+ up a single task's output that can not yet been committed. This may be
+ called multiple times for the same task, but for different task attempts.
+ 
+ @param taskContext
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="isRecoverySupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #isRecoverySupported(JobContext)} instead.">
+      <doc>
+      <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this is
+ a bridge between the two.
+ 
+ @deprecated Use {@link #isRecoverySupported(JobContext)} instead.]]>
+      </doc>
+    </method>
+    <method name="isRecoverySupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Is task output recovery supported for restarting jobs?
+ 
+ If task output recovery is supported, job restart can be done more
+ efficiently.
+
+ @param jobContext
+          Context of the job whose output is being written.
+ @return <code>true</code> if task output recovery is supported,
+         <code>false</code> otherwise
+ @throws IOException
+ @see #recoverTask(TaskAttemptContext)]]>
+      </doc>
+    </method>
+    <method name="isCommitJobRepeatable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapred.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns true if an in-progress job commit can be retried. If the MR AM is
+ re-run then it will check this value to determine if it can retry an
+ in-progress commit that was started by a previous version.
+ Note that in rare scenarios, the previous AM version might still be running
+ at that time, due to system anomalies. Hence if this method returns true
+ then the retry commit operation should be able to run concurrently with
+ the previous operation.
+
+ If repeatable job commit is supported, job restart can tolerate previous
+ AM failures during job commit.
+
+ By default, it is not supported. Extended classes (like:
+ FileOutputCommitter) should explicitly override it if provide support.
+
+ @param jobContext
+          Context of the job whose output is being written.
+ @return <code>true</code> repeatable job commit is supported,
+         <code>false</code> otherwise
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="isCommitJobRepeatable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="recoverTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapred.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Recover the task output. 
+ 
+ The retry-count for the job will be passed via the 
+ {@link MRConstants#APPLICATION_ATTEMPT_ID} key in  
+ {@link TaskAttemptContext#getConfiguration()} for the 
+ <code>OutputCommitter</code>. This is called from the application master
+ process, but it is called individually for each task.
+ 
+ If an exception is thrown the task will be attempted again. 
+ 
+ @param taskContext Context of the task whose output is being recovered
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setupJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this
+ is a bridge between the two.]]>
+      </doc>
+    </method>
+    <method name="cleanupJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="Use {@link #commitJob(org.apache.hadoop.mapreduce.JobContext)}
+             or {@link #abortJob(org.apache.hadoop.mapreduce.JobContext, org.apache.hadoop.mapreduce.JobStatus.State)}
+             instead.">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this
+ is a bridge between the two.
+ @deprecated Use {@link #commitJob(org.apache.hadoop.mapreduce.JobContext)}
+             or {@link #abortJob(org.apache.hadoop.mapreduce.JobContext, org.apache.hadoop.mapreduce.JobStatus.State)}
+             instead.]]>
+      </doc>
+    </method>
+    <method name="commitJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this
+ is a bridge between the two.]]>
+      </doc>
+    </method>
+    <method name="abortJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <param name="runState" type="org.apache.hadoop.mapreduce.JobStatus.State"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this
+ is a bridge between the two.]]>
+      </doc>
+    </method>
+    <method name="setupTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this
+ is a bridge between the two.]]>
+      </doc>
+    </method>
+    <method name="needsTaskCommit" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this
+ is a bridge between the two.]]>
+      </doc>
+    </method>
+    <method name="commitTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this
+ is a bridge between the two.]]>
+      </doc>
+    </method>
+    <method name="abortTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this
+ is a bridge between the two.]]>
+      </doc>
+    </method>
+    <method name="recoverTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this
+ is a bridge between the two.]]>
+      </doc>
+    </method>
+    <method name="isRecoverySupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="true" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This method implements the new interface by calling the old method. Note
+ that the input types are different between the new and old apis and this is
+ a bridge between the two.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<code>OutputCommitter</code> describes the commit of task output for a 
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputCommitter</code> of 
+ the job to:<p>
+ <ol>
+   <li>
+   Setup the job during initialization. For example, create the temporary 
+   output directory for the job during the initialization of the job.
+   </li>
+   <li>
+   Cleanup the job after the job completion. For example, remove the
+   temporary output directory after the job completion. 
+   </li>
+   <li>
+   Setup the task temporary output.
+   </li> 
+   <li>
+   Check whether a task needs a commit. This is to avoid the commit
+   procedure if a task does not need commit.
+   </li>
+   <li>
+   Commit of the task output.
+   </li>  
+   <li>
+   Discard the task commit.
+   </li>
+ </ol>
+ The methods in this class can be called from several different processes and
+ from several different contexts.  It is important to know which process and
+ which context each is called from.  Each method should be marked accordingly
+ in its documentation.  It is also important to note that not all methods are
+ guaranteed to be called once and only once.  If a method is not guaranteed to
+ have this property the output committer needs to handle this appropriately. 
+ Also note it will only be in rare situations where they may be called 
+ multiple times for the same task.
+ 
+ @see FileOutputCommitter 
+ @see JobContext
+ @see TaskAttemptContext]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.OutputCommitter -->
+  <!-- start interface org.apache.hadoop.mapred.OutputFormat -->
+  <interface name="OutputFormat"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the {@link RecordWriter} for the given job.
+
+ @param ignored
+ @param job configuration for the job whose output is being written.
+ @param name the unique name for this part of the output.
+ @param progress mechanism for reporting progress while writing to file.
+ @return a {@link RecordWriter} to write the output for the job.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="checkOutputSpecs"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Check for validity of the output-specification for the job.
+  
+ <p>This is to validate the output specification for the job when it is
+ a job is submitted.  Typically checks that it does not already exist,
+ throwing an exception when it already exists, so that output is not
+ overwritten.</p>
+
+ Implementations which write to filesystems which support delegation
+ tokens usually collect the tokens for the destination path(s)
+ and attach them to the job configuration.
+ @param ignored
+ @param job job configuration.
+ @throws IOException when output should not be attempted]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<code>OutputFormat</code> describes the output-specification for a 
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of the
+ job to:<p>
+ <ol>
+   <li>
+   Validate the output-specification of the job. For e.g. check that the 
+   output directory doesn't already exist. 
+   <li>
+   Provide the {@link RecordWriter} implementation to be used to write out
+   the output files of the job. Output files are stored in a 
+   {@link FileSystem}.
+   </li>
+ </ol>
+ 
+ @see RecordWriter
+ @see JobConf]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.OutputFormat -->
+  <!-- start class org.apache.hadoop.mapred.OutputLogFilter -->
+  <class name="OutputLogFilter" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.fs.PathFilter"/>
+    <constructor name="OutputLogFilter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="accept" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <doc>
+    <![CDATA[This class filters log files from directory given
+ It doesnt accept paths having _logs.
+ This can be used to list paths of output directory as follows:
+   Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
+                                   new OutputLogFilter()));]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.OutputLogFilter -->
+  <!-- start interface org.apache.hadoop.mapred.Partitioner -->
+  <interface name="Partitioner"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+    <method name="getPartition" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K2"/>
+      <param name="value" type="V2"/>
+      <param name="numPartitions" type="int"/>
+      <doc>
+      <![CDATA[Get the paritition number for a given key (hence record) given the total 
+ number of partitions i.e. number of reduce-tasks for the job.
+   
+ <p>Typically a hash function on a all or a subset of the key.</p>
+
+ @param key the key to be paritioned.
+ @param value the entry value.
+ @param numPartitions the total number of partitions.
+ @return the partition number for the <code>key</code>.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Partitions the key space.
+ 
+ <p><code>Partitioner</code> controls the partitioning of the keys of the 
+ intermediate map-outputs. The key (or a subset of the key) is used to derive
+ the partition, typically by a hash function. The total number of partitions
+ is the same as the number of reduce tasks for the job. Hence this controls
+ which of the <code>m</code> reduce tasks the intermediate key (and hence the 
+ record) is sent for reduction.</p>
+
+ <p>Note: A <code>Partitioner</code> is created only when there are multiple
+ reducers.</p>
+ 
+ @see Reducer]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.Partitioner -->
+  <!-- start interface org.apache.hadoop.mapred.RecordReader -->
+  <interface name="RecordReader"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Closeable"/>
+    <method name="next" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Reads the next key/value pair from the input for processing.
+
+ @param key the key to read data into
+ @param value the value to read data into
+ @return true iff a key/value was read, false if at EOF]]>
+      </doc>
+    </method>
+    <method name="createKey" return="K"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an object of the appropriate type to be used as a key.
+ 
+ @return a new key object.]]>
+      </doc>
+    </method>
+    <method name="createValue" return="V"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an object of the appropriate type to be used as a value.
+ 
+ @return a new value object.]]>
+      </doc>
+    </method>
+    <method name="getPos" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns the current position in the input.
+ 
+ @return the current position in the input.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Close this {@link InputSplit} to future operations.
+ 
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[How much of the input has the {@link RecordReader} consumed i.e.
+ has been processed by?
+ 
+ @return progress from <code>0.0</code> to <code>1.0</code>.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<code>RecordReader</code> reads &lt;key, value&gt; pairs from an 
+ {@link InputSplit}.
+   
+ <p><code>RecordReader</code>, typically, converts the byte-oriented view of 
+ the input, provided by the <code>InputSplit</code>, and presents a 
+ record-oriented view for the {@link Mapper} and {@link Reducer} tasks for
+ processing. It thus assumes the responsibility of processing record 
+ boundaries and presenting the tasks with keys and values.</p>
+ 
+ @see InputSplit
+ @see InputFormat]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.RecordReader -->
+  <!-- start interface org.apache.hadoop.mapred.RecordWriter -->
+  <interface name="RecordWriter"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="write"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes a key/value pair.
+
+ @param key the key to write.
+ @param value the value to write.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Close this <code>RecordWriter</code> to future operations.
+ 
+ @param reporter facility to report progress.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<code>RecordWriter</code> writes the output &lt;key, value&gt; pairs 
+ to an output file.
+ 
+ <p><code>RecordWriter</code> implementations write the job outputs to the
+ {@link FileSystem}.
+ 
+ @see OutputFormat]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.RecordWriter -->
+  <!-- start interface org.apache.hadoop.mapred.Reducer -->
+  <interface name="Reducer"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+    <implements name="org.apache.hadoop.io.Closeable"/>
+    <method name="reduce"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K2"/>
+      <param name="values" type="java.util.Iterator"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<i>Reduces</i> values for a given key.  
+ 
+ <p>The framework calls this method for each 
+ <code>&lt;key, (list of values)&gt;</code> pair in the grouped inputs.
+ Output values must be of the same type as input values.  Input keys must 
+ not be altered. The framework will <b>reuse</b> the key and value objects
+ that are passed into the reduce, therefore the application should clone
+ the objects they want to keep a copy of. In many cases, all values are 
+ combined into zero or one value.
+ </p>
+   
+ <p>Output pairs are collected with calls to  
+ {@link OutputCollector#collect(Object,Object)}.</p>
+
+ <p>Applications can use the {@link Reporter} provided to report progress 
+ or just indicate that they are alive. In scenarios where the application 
+ takes a significant amount of time to process individual key/value 
+ pairs, this is crucial since the framework might assume that the task has 
+ timed-out and kill that task. The other way of avoiding this is to set 
+ <a href="{@docRoot}/../hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml#mapreduce.task.timeout">
+ mapreduce.task.timeout</a> to a high-enough value (or even zero for no 
+ time-outs).</p>
+ 
+ @param key the key.
+ @param values the list of values to reduce.
+ @param output to collect keys and combined values.
+ @param reporter facility to report progress.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Reduces a set of intermediate values which share a key to a smaller set of
+ values.  
+ 
+ <p>The number of <code>Reducer</code>s for the job is set by the user via 
+ {@link JobConf#setNumReduceTasks(int)}. <code>Reducer</code> implementations 
+ can access the {@link JobConf} for the job via the 
+ {@link JobConfigurable#configure(JobConf)} method and initialize themselves. 
+ Similarly they can use the {@link Closeable#close()} method for
+ de-initialization.</p>
+
+ <p><code>Reducer</code> has 3 primary phases:</p>
+ <ol>
+   <li>
+   
+   <b id="Shuffle">Shuffle</b>
+   
+   <p><code>Reducer</code> is input the grouped output of a {@link Mapper}.
+   In the phase the framework, for each <code>Reducer</code>, fetches the 
+   relevant partition of the output of all the <code>Mapper</code>s, via HTTP. 
+   </p>
+   </li>
+   
+   <li>
+   <b id="Sort">Sort</b>
+   
+   <p>The framework groups <code>Reducer</code> inputs by <code>key</code>s 
+   (since different <code>Mapper</code>s may have output the same key) in this
+   stage.</p>
+   
+   <p>The shuffle and sort phases occur simultaneously i.e. while outputs are
+   being fetched they are merged.</p>
+      
+   <b id="SecondarySort">SecondarySort</b>
+   
+   <p>If equivalence rules for keys while grouping the intermediates are 
+   different from those for grouping keys before reduction, then one may 
+   specify a <code>Comparator</code> via 
+   {@link JobConf#setOutputValueGroupingComparator(Class)}.Since 
+   {@link JobConf#setOutputKeyComparatorClass(Class)} can be used to 
+   control how intermediate keys are grouped, these can be used in conjunction 
+   to simulate <i>secondary sort on values</i>.</p>
+   
+   
+   For example, say that you want to find duplicate web pages and tag them 
+   all with the url of the "best" known example. You would set up the job 
+   like:
+   <ul>
+     <li>Map Input Key: url</li>
+     <li>Map Input Value: document</li>
+     <li>Map Output Key: document checksum, url pagerank</li>
+     <li>Map Output Value: url</li>
+     <li>Partitioner: by checksum</li>
+     <li>OutputKeyComparator: by checksum and then decreasing pagerank</li>
+     <li>OutputValueGroupingComparator: by checksum</li>
+   </ul>
+   </li>
+   
+   <li>   
+   <b id="Reduce">Reduce</b>
+   
+   <p>In this phase the 
+   {@link #reduce(Object, Iterator, OutputCollector, Reporter)}
+   method is called for each <code>&lt;key, (list of values)&gt;</code> pair in
+   the grouped inputs.</p>
+   <p>The output of the reduce task is typically written to the 
+   {@link FileSystem} via 
+   {@link OutputCollector#collect(Object, Object)}.</p>
+   </li>
+ </ol>
+ 
+ <p>The output of the <code>Reducer</code> is <b>not re-sorted</b>.</p>
+ 
+ <p>Example:</p>
+ <p><blockquote><pre>
+     public class MyReducer&lt;K extends WritableComparable, V extends Writable&gt; 
+     extends MapReduceBase implements Reducer&lt;K, V, K, V&gt; {
+     
+       static enum MyCounters { NUM_RECORDS }
+        
+       private String reduceTaskId;
+       private int noKeys = 0;
+       
+       public void configure(JobConf job) {
+         reduceTaskId = job.get(JobContext.TASK_ATTEMPT_ID);
+       }
+       
+       public void reduce(K key, Iterator&lt;V&gt; values,
+                          OutputCollector&lt;K, V&gt; output, 
+                          Reporter reporter)
+       throws IOException {
+       
+         // Process
+         int noValues = 0;
+         while (values.hasNext()) {
+           V value = values.next();
+           
+           // Increment the no. of values for this key
+           ++noValues;
+           
+           // Process the &lt;key, value&gt; pair (assume this takes a while)
+           // ...
+           // ...
+           
+           // Let the framework know that we are alive, and kicking!
+           if ((noValues%10) == 0) {
+             reporter.progress();
+           }
+         
+           // Process some more
+           // ...
+           // ...
+           
+           // Output the &lt;key, value&gt; 
+           output.collect(key, value);
+         }
+         
+         // Increment the no. of &lt;key, list of values&gt; pairs processed
+         ++noKeys;
+         
+         // Increment counters
+         reporter.incrCounter(NUM_RECORDS, 1);
+         
+         // Every 100 keys update application-level status
+         if ((noKeys%100) == 0) {
+           reporter.setStatus(reduceTaskId + " processed " + noKeys);
+         }
+       }
+     }
+ </pre></blockquote>
+ 
+ @see Mapper
+ @see Partitioner
+ @see Reporter
+ @see MapReduceBase]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.Reducer -->
+  <!-- start interface org.apache.hadoop.mapred.Reporter -->
+  <interface name="Reporter"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.util.Progressable"/>
+    <method name="setStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="status" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the status description for the task.
+ 
+ @param status brief description of the current status.]]>
+      </doc>
+    </method>
+    <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.Enum"/>
+      <doc>
+      <![CDATA[Get the {@link Counter} of the given group with the given name.
+ 
+ @param name counter name
+ @return the <code>Counter</code> of the given group/name.]]>
+      </doc>
+    </method>
+    <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="group" type="java.lang.String"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the {@link Counter} of the given group with the given name.
+ 
+ @param group counter group
+ @param name counter name
+ @return the <code>Counter</code> of the given group/name.]]>
+      </doc>
+    </method>
+    <method name="incrCounter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Enum"/>
+      <param name="amount" type="long"/>
+      <doc>
+      <![CDATA[Increments the counter identified by the key, which can be of
+ any {@link Enum} type, by the specified amount.
+ 
+ @param key key to identify the counter to be incremented. The key can be
+            be any <code>Enum</code>. 
+ @param amount A non-negative amount by which the counter is to 
+               be incremented.]]>
+      </doc>
+    </method>
+    <method name="incrCounter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="group" type="java.lang.String"/>
+      <param name="counter" type="java.lang.String"/>
+      <param name="amount" type="long"/>
+      <doc>
+      <![CDATA[Increments the counter identified by the group and counter name
+ by the specified amount.
+ 
+ @param group name to identify the group of the counter to be incremented.
+ @param counter name to identify the counter within the group.
+ @param amount A non-negative amount by which the counter is to 
+               be incremented.]]>
+      </doc>
+    </method>
+    <method name="getInputSplit" return="org.apache.hadoop.mapred.InputSplit"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/>
+      <doc>
+      <![CDATA[Get the {@link InputSplit} object for a map.
+ 
+ @return the <code>InputSplit</code> that the map is reading from.
+ @throws UnsupportedOperationException if called outside a mapper]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the progress of the task. Progress is represented as a number between
+ 0 and 1 (inclusive).]]>
+      </doc>
+    </method>
+    <field name="NULL" type="org.apache.hadoop.mapred.Reporter"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A constant of Reporter type that does nothing.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[A facility for Map-Reduce applications to report progress and update 
+ counters, status information etc.
+ 
+ <p>{@link Mapper} and {@link Reducer} can use the <code>Reporter</code>
+ provided to report progress or just indicate that they are alive. In 
+ scenarios where the application takes significant amount of time to
+ process individual key/value pairs, this is crucial since the framework 
+ might assume that the task has timed-out and kill that task.
+
+ <p>Applications can also update {@link Counters} via the provided 
+ <code>Reporter</code> .</p>
+ 
+ @see Progressable
+ @see Counters]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.Reporter -->
+  <!-- start interface org.apache.hadoop.mapred.RunningJob -->
+  <interface name="RunningJob"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getConfiguration" return="org.apache.hadoop.conf.Configuration"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the underlying job configuration
+
+ @return the configuration of the job.]]>
+      </doc>
+    </method>
+    <method name="getID" return="org.apache.hadoop.mapred.JobID"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the job identifier.
+ 
+ @return the job identifier.]]>
+      </doc>
+    </method>
+    <method name="getJobID" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="This method is deprecated and will be removed. Applications should 
+ rather use {@link #getID()}.">
+      <doc>
+      <![CDATA[@deprecated This method is deprecated and will be removed. Applications should 
+ rather use {@link #getID()}.]]>
+      </doc>
+    </method>
+    <method name="getJobName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the name of the job.
+ 
+ @return the name of the job.]]>
+      </doc>
+    </method>
+    <method name="getJobFile" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the path of the submitted job configuration.
+ 
+ @return the path of the submitted job configuration.]]>
+      </doc>
+    </method>
+    <method name="getTrackingURL" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the URL where some job progress information will be displayed.
+ 
+ @return the URL where some job progress information will be displayed.]]>
+      </doc>
+    </method>
+    <method name="mapProgress" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the <i>progress</i> of the job's map-tasks, as a float between 0.0 
+ and 1.0.  When all map tasks have completed, the function returns 1.0.
+ 
+ @return the progress of the job's map-tasks.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="reduceProgress" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the <i>progress</i> of the job's reduce-tasks, as a float between 0.0 
+ and 1.0.  When all reduce tasks have completed, the function returns 1.0.
+ 
+ @return the progress of the job's reduce-tasks.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="cleanupProgress" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the <i>progress</i> of the job's cleanup-tasks, as a float between 0.0 
+ and 1.0.  When all cleanup tasks have completed, the function returns 1.0.
+ 
+ @return the progress of the job's cleanup-tasks.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setupProgress" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the <i>progress</i> of the job's setup-tasks, as a float between 0.0 
+ and 1.0.  When all setup tasks have completed, the function returns 1.0.
+ 
+ @return the progress of the job's setup-tasks.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="isComplete" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Check if the job is finished or not. 
+ This is a non-blocking call.
+ 
+ @return <code>true</code> if the job is complete, else <code>false</code>.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="isSuccessful" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Check if the job completed successfully. 
+ 
+ @return <code>true</code> if the job succeeded, else <code>false</code>.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="waitForCompletion"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Blocks until the job is complete.
+ 
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getJobState" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns the current state of the Job.
+ {@link JobStatus}
+ 
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getJobStatus" return="org.apache.hadoop.mapred.JobStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns a snapshot of the current status, {@link JobStatus}, of the Job.
+ Need to call again for latest information.
+ 
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="killJob"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Kill the running job. Blocks until all job tasks have been killed as well.
+ If the job is no longer running, it simply returns.
+ 
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setJobPriority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the priority of a running job.
+ @param priority the new priority for the job.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="startFrom" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get events indicating completion (success/failure) of component tasks.
+  
+ @param startFrom index to start fetching events from
+ @return an array of {@link TaskCompletionEvent}s
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="killTask"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+      <param name="shouldFail" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Kill indicated task attempt.
+ 
+ @param taskId the id of the task to be terminated.
+ @param shouldFail if true the task is failed and added to failed tasks 
+                   list, otherwise it is just killed, w/o affecting 
+                   job failure status.  
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="killTask"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Applications should rather use {@link #killTask(TaskAttemptID, boolean)}">
+      <param name="taskId" type="java.lang.String"/>
+      <param name="shouldFail" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@deprecated Applications should rather use {@link #killTask(TaskAttemptID, boolean)}]]>
+      </doc>
+    </method>
+    <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets the counters for this job.
+ 
+ @return the counters for this job or null if the job has been retired.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getTaskDiagnostics" return="java.lang.String[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskid" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets the diagnostic messages for a given task attempt.
+ @param taskid
+ @return the list of diagnostic messages for the task
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getHistoryUrl" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the url where history file is archived. Returns empty string if 
+ history file is not available yet. 
+ 
+ @return the url where history file is archived
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="isRetired" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Check whether the job has been removed from JobTracker memory and retired.
+ On retire, the job history file is copied to a location known by 
+ {@link #getHistoryUrl()}
+ @return <code>true</code> if the job retired, else <code>false</code>.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getFailureInfo" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get failure info for the job.
+ @return the failure info for the job.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<code>RunningJob</code> is the user-interface to query for details on a 
+ running Map-Reduce job.
+ 
+ <p>Clients can get hold of <code>RunningJob</code> via the {@link JobClient}
+ and then query the running-job for details such as name, configuration, 
+ progress etc.</p> 
+ 
+ @see JobClient]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.RunningJob -->
+  <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+  <class name="SequenceFileAsBinaryInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SequenceFileAsBinaryInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[InputFormat reading keys, values from SequenceFiles in binary (raw)
+ format.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
+  <class name="SequenceFileAsBinaryOutputFormat" extends="org.apache.hadoop.mapred.SequenceFileOutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SequenceFileAsBinaryOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setSequenceFileOutputKeyClass"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the key class for the {@link SequenceFile}
+ <p>This allows the user to specify the key class to be different 
+ from the actual class ({@link BytesWritable}) used for writing </p>
+ 
+ @param conf the {@link JobConf} to modify
+ @param theClass the SequenceFile output key class.]]>
+      </doc>
+    </method>
+    <method name="setSequenceFileOutputValueClass"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the value class for the {@link SequenceFile}
+ <p>This allows the user to specify the value class to be different 
+ from the actual class ({@link BytesWritable}) used for writing </p>
+ 
+ @param conf the {@link JobConf} to modify
+ @param theClass the SequenceFile output key class.]]>
+      </doc>
+    </method>
+    <method name="getSequenceFileOutputKeyClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get the key class for the {@link SequenceFile}
+ 
+ @return the key class of the {@link SequenceFile}]]>
+      </doc>
+    </method>
+    <method name="getSequenceFileOutputValueClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get the value class for the {@link SequenceFile}
+ 
+ @return the value class of the {@link SequenceFile}]]>
+      </doc>
+    </method>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="checkOutputSpecs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[An {@link OutputFormat} that writes keys, values to 
+ {@link SequenceFile}s in binary(raw) format]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryOutputFormat -->
+  <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+  <class name="SequenceFileAsTextInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SequenceFileAsTextInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[This class is similar to SequenceFileInputFormat, 
+ except it generates SequenceFileAsTextRecordReader 
+ which converts the input keys and values to their 
+ String forms by calling toString() method.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+  <class name="SequenceFileAsTextRecordReader" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.RecordReader"/>
+    <constructor name="SequenceFileAsTextRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="createKey" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createValue" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="next" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.io.Text"/>
+      <param name="value" type="org.apache.hadoop.io.Text"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read key/value pair in a line.]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getPos" return="long"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[This class converts the input keys and values to their String forms by calling toString()
+ method. This class to SequenceFileAsTextInputFormat class is as LineRecordReader
+ class to TextInputFormat class.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader -->
+  <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+  <class name="SequenceFileInputFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SequenceFileInputFilter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a record reader for the given split
+ @param split file split
+ @param job job configuration
+ @param reporter reporter who sends report to task tracker
+ @return RecordReader]]>
+      </doc>
+    </method>
+    <method name="setFilterClass"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="filterClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[set the filter class
+ 
+ @param conf application configuration
+ @param filterClass filter class]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A class that allows a map/red job to work on a sample of sequence files.
+ The sample is decided by the filter class set by the job.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter -->
+  <!-- start class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+  <class name="SequenceFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SequenceFileInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[An {@link InputFormat} for {@link SequenceFile}s.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.SequenceFileInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+  <class name="SequenceFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SequenceFileOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getReaders" return="org.apache.hadoop.io.SequenceFile.Reader[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="dir" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Open the output generated by this format.]]>
+      </doc>
+    </method>
+    <method name="getOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf}
+ @return the {@link CompressionType} for the output {@link SequenceFile}, 
+         defaulting to {@link CompressionType#RECORD}]]>
+      </doc>
+    </method>
+    <method name="setOutputCompressionType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+      <doc>
+      <![CDATA[Set the {@link CompressionType} for the output {@link SequenceFile}.
+ @param conf the {@link JobConf} to modify
+ @param style the {@link CompressionType} for the output
+              {@link SequenceFile}]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[An {@link OutputFormat} that writes {@link SequenceFile}s.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.SequenceFileOutputFormat -->
+  <!-- start class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+  <class name="SequenceFileRecordReader" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.RecordReader"/>
+    <constructor name="SequenceFileRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="getKeyClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The class of key that must be passed to {@link
+ #next(Object, Object)}..]]>
+      </doc>
+    </method>
+    <method name="getValueClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The class of value that must be passed to {@link
+ #next(Object, Object)}..]]>
+      </doc>
+    </method>
+    <method name="createKey" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createValue" return="V"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="next" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="next" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getCurrentValue"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="value" type="V"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+      </doc>
+    </method>
+    <method name="getPos" return="long"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="seek"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="pos" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="conf" type="org.apache.hadoop.conf.Configuration"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[An {@link RecordReader} for {@link SequenceFile}s.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.SequenceFileRecordReader -->
+  <!-- start class org.apache.hadoop.mapred.SkipBadRecords -->
+  <class name="SkipBadRecords" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SkipBadRecords"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAttemptsToStartSkipping" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the number of Task attempts AFTER which skip mode 
+ will be kicked off. When skip mode is kicked off, the 
+ tasks reports the range of records which it will process 
+ next to the TaskTracker. So that on failures, TT knows which 
+ ones are possibly the bad records. On further executions, 
+ those are skipped.
+ Default value is 2.
+ 
+ @param conf the configuration
+ @return attemptsToStartSkipping no of task attempts]]>
+      </doc>
+    </method>
+    <method name="setAttemptsToStartSkipping"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="attemptsToStartSkipping" type="int"/>
+      <doc>
+      <![CDATA[Set the number of Task attempts AFTER which skip mode 
+ will be kicked off. When skip mode is kicked off, the 
+ tasks reports the range of records which it will process 
+ next to the TaskTracker. So that on failures, TT knows which 
+ ones are possibly the bad records. On further executions, 
+ those are skipped.
+ Default value is 2.
+ 
+ @param conf the configuration
+ @param attemptsToStartSkipping no of task attempts]]>
+      </doc>
+    </method>
+    <method name="getAutoIncrMapperProcCount" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the flag which if set to true, 
+ {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS} is incremented 
+ by MapRunner after invoking the map function. This value must be set to 
+ false for applications which process the records asynchronously 
+ or buffer the input records. For example streaming. 
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+ 
+ @param conf the configuration
+ @return <code>true</code> if auto increment 
+                       {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS}.
+         <code>false</code> otherwise.]]>
+      </doc>
+    </method>
+    <method name="setAutoIncrMapperProcCount"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="autoIncr" type="boolean"/>
+      <doc>
+      <![CDATA[Set the flag which if set to true, 
+ {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS} is incremented 
+ by MapRunner after invoking the map function. This value must be set to 
+ false for applications which process the records asynchronously 
+ or buffer the input records. For example streaming. 
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+ 
+ @param conf the configuration
+ @param autoIncr whether to auto increment 
+        {@link SkipBadRecords#COUNTER_MAP_PROCESSED_RECORDS}.]]>
+      </doc>
+    </method>
+    <method name="getAutoIncrReducerProcCount" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the flag which if set to true, 
+ {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS} is incremented 
+ by framework after invoking the reduce function. This value must be set to 
+ false for applications which process the records asynchronously 
+ or buffer the input records. For example streaming. 
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+ 
+ @param conf the configuration
+ @return <code>true</code> if auto increment 
+                    {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS}.
+         <code>false</code> otherwise.]]>
+      </doc>
+    </method>
+    <method name="setAutoIncrReducerProcCount"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="autoIncr" type="boolean"/>
+      <doc>
+      <![CDATA[Set the flag which if set to true, 
+ {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS} is incremented 
+ by framework after invoking the reduce function. This value must be set to 
+ false for applications which process the records asynchronously 
+ or buffer the input records. For example streaming. 
+ In such cases applications should increment this counter on their own.
+ Default value is true.
+ 
+ @param conf the configuration
+ @param autoIncr whether to auto increment 
+        {@link SkipBadRecords#COUNTER_REDUCE_PROCESSED_GROUPS}.]]>
+      </doc>
+    </method>
+    <method name="getSkipOutputPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the directory to which skipped records are written. By default it is 
+ the sub directory of the output _logs directory.
+ User can stop writing skipped records by setting the value null.
+ 
+ @param conf the configuration.
+ @return path skip output directory. Null is returned if this is not set 
+ and output directory is also not set.]]>
+      </doc>
+    </method>
+    <method name="setSkipOutputPath"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Set the directory to which skipped records are written. By default it is 
+ the sub directory of the output _logs directory.
+ User can stop writing skipped records by setting the value null.
+ 
+ @param conf the configuration.
+ @param path skip output directory path]]>
+      </doc>
+    </method>
+    <method name="getMapperMaxSkipRecords" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the number of acceptable skip records surrounding the bad record PER 
+ bad record in mapper. The number includes the bad record as well.
+ To turn the feature of detection/skipping of bad records off, set the 
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying  
+ until this threshold is met OR all attempts get exhausted for this task. 
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to 
+ narrow down. Whatever records(depends on application) get skipped are 
+ acceptable.
+ Default value is 0.
+ 
+ @param conf the configuration
+ @return maxSkipRecs acceptable skip records.]]>
+      </doc>
+    </method>
+    <method name="setMapperMaxSkipRecords"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="maxSkipRecs" type="long"/>
+      <doc>
+      <![CDATA[Set the number of acceptable skip records surrounding the bad record PER 
+ bad record in mapper. The number includes the bad record as well.
+ To turn the feature of detection/skipping of bad records off, set the 
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying  
+ until this threshold is met OR all attempts get exhausted for this task. 
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to 
+ narrow down. Whatever records(depends on application) get skipped are 
+ acceptable.
+ Default value is 0.
+ 
+ @param conf the configuration
+ @param maxSkipRecs acceptable skip records.]]>
+      </doc>
+    </method>
+    <method name="getReducerMaxSkipGroups" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the number of acceptable skip groups surrounding the bad group PER 
+ bad group in reducer. The number includes the bad group as well.
+ To turn the feature of detection/skipping of bad groups off, set the 
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying  
+ until this threshold is met OR all attempts get exhausted for this task. 
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to 
+ narrow down. Whatever groups(depends on application) get skipped are 
+ acceptable.
+ Default value is 0.
+ 
+ @param conf the configuration
+ @return maxSkipGrps acceptable skip groups.]]>
+      </doc>
+    </method>
+    <method name="setReducerMaxSkipGroups"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="maxSkipGrps" type="long"/>
+      <doc>
+      <![CDATA[Set the number of acceptable skip groups surrounding the bad group PER 
+ bad group in reducer. The number includes the bad group as well.
+ To turn the feature of detection/skipping of bad groups off, set the 
+ value to 0.
+ The framework tries to narrow down the skipped range by retrying  
+ until this threshold is met OR all attempts get exhausted for this task. 
+ Set the value to Long.MAX_VALUE to indicate that framework need not try to 
+ narrow down. Whatever groups(depends on application) get skipped are 
+ acceptable.
+ Default value is 0.
+ 
+ @param conf the configuration
+ @param maxSkipGrps acceptable skip groups.]]>
+      </doc>
+    </method>
+    <field name="COUNTER_GROUP" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Special counters which are written by the application and are 
+ used by the framework for detecting bad records. For detecting bad records 
+ these counters must be incremented by the application.]]>
+      </doc>
+    </field>
+    <field name="COUNTER_MAP_PROCESSED_RECORDS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of processed map records.
+ @see SkipBadRecords#getAutoIncrMapperProcCount(Configuration)]]>
+      </doc>
+    </field>
+    <field name="COUNTER_REDUCE_PROCESSED_GROUPS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of processed reduce groups.
+ @see SkipBadRecords#getAutoIncrReducerProcCount(Configuration)]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[Utility class for skip bad records functionality. It contains various 
+ settings related to skipping of bad records.
+ 
+ <p>Hadoop provides an optional mode of execution in which the bad records
+ are detected and skipped in further attempts.
+ 
+ <p>This feature can be used when map/reduce tasks crashes deterministically on 
+ certain input. This happens due to bugs in the map/reduce function. The usual
+ course would be to fix these bugs. But sometimes this is not possible; 
+ perhaps the bug is in third party libraries for which the source code is 
+ not available. Due to this, the task never reaches to completion even with 
+ multiple attempts and complete data for that task is lost.</p>
+  
+ <p>With this feature, only a small portion of data is lost surrounding 
+ the bad record, which may be acceptable for some user applications.
+ see {@link SkipBadRecords#setMapperMaxSkipRecords(Configuration, long)}</p>
+ 
+ <p>The skipping mode gets kicked off after certain no of failures 
+ see {@link SkipBadRecords#setAttemptsToStartSkipping(Configuration, int)}</p>
+  
+ <p>In the skipping mode, the map/reduce task maintains the record range which 
+ is getting processed at all times. Before giving the input to the
+ map/reduce function, it sends this record range to the Task tracker.
+ If task crashes, the Task tracker knows which one was the last reported
+ range. On further attempts that range get skipped.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.SkipBadRecords -->
+  <!-- start class org.apache.hadoop.mapred.SplitLocationInfo -->
+  <class name="SplitLocationInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SplitLocationInfo" type="java.lang.String, boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="isOnDisk" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isInMemory" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getLocation" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.SplitLocationInfo -->
+  <!-- start interface org.apache.hadoop.mapred.TaskAttemptContext -->
+  <interface name="TaskAttemptContext"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+    <method name="getTaskAttemptID" return="org.apache.hadoop.mapred.TaskAttemptID"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProgressible" return="org.apache.hadoop.util.Progressable"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.TaskAttemptContext -->
+  <!-- start class org.apache.hadoop.mapred.TaskAttemptID -->
+  <class name="TaskAttemptID" extends="org.apache.hadoop.mapreduce.TaskAttemptID"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TaskAttemptID" type="org.apache.hadoop.mapred.TaskID, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a TaskAttemptID object from given {@link TaskID}.  
+ @param taskId TaskID that this task belongs to  
+ @param id the task attempt number]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskAttemptID" type="java.lang.String, int, boolean, int, int"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #TaskAttemptID(String, int, TaskType, int, int)}.">
+      <doc>
+      <![CDATA[Constructs a TaskId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number 
+ @param isMap whether the tip is a map 
+ @param taskId taskId number
+ @param id the task attempt number
+ @deprecated Use {@link #TaskAttemptID(String, int, TaskType, int, int)}.]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskAttemptID" type="java.lang.String, int, org.apache.hadoop.mapreduce.TaskType, int, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a TaskId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number 
+ @param type the TaskType 
+ @param taskId taskId number
+ @param id the task attempt number]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskAttemptID"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="downgrade" return="org.apache.hadoop.mapred.TaskAttemptID"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="old" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
+      <doc>
+      <![CDATA[Downgrade a new TaskAttemptID to an old one
+ @param old the new id
+ @return either old or a new TaskAttemptID constructed to match old]]>
+      </doc>
+    </method>
+    <method name="getTaskID" return="org.apache.hadoop.mapred.TaskID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="read" return="org.apache.hadoop.mapred.TaskAttemptID"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="forName" return="org.apache.hadoop.mapred.TaskAttemptID"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="str" type="java.lang.String"/>
+      <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+      <doc>
+      <![CDATA[Construct a TaskAttemptID object from given string 
+ @return constructed TaskAttemptID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+      </doc>
+    </method>
+    <method name="getTaskAttemptIDsPattern" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jtIdentifier" type="java.lang.String"/>
+      <param name="jobId" type="java.lang.Integer"/>
+      <param name="isMap" type="java.lang.Boolean"/>
+      <param name="taskId" type="java.lang.Integer"/>
+      <param name="attemptId" type="java.lang.Integer"/>
+      <doc>
+      <![CDATA[Returns a regex pattern which matches task attempt IDs. Arguments can 
+ be given null, in which case that part of the regex will be generic.  
+ For example to obtain a regex matching <i>all task attempt IDs</i> 
+ of <i>any jobtracker</i>, in <i>any job</i>, of the <i>first 
+ map task</i>, we would use :
+ <pre> 
+ TaskAttemptID.getTaskAttemptIDsPattern(null, null, true, 1, null);
+ </pre>
+ which will return :
+ <pre> "attempt_[^_]*_[0-9]*_m_000001_[0-9]*" </pre> 
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param isMap whether the tip is a map, or null 
+ @param taskId taskId number, or null
+ @param attemptId the task attempt number, or null
+ @return a regex pattern matching TaskAttemptIDs]]>
+      </doc>
+    </method>
+    <method name="getTaskAttemptIDsPattern" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jtIdentifier" type="java.lang.String"/>
+      <param name="jobId" type="java.lang.Integer"/>
+      <param name="type" type="org.apache.hadoop.mapreduce.TaskType"/>
+      <param name="taskId" type="java.lang.Integer"/>
+      <param name="attemptId" type="java.lang.Integer"/>
+      <doc>
+      <![CDATA[Returns a regex pattern which matches task attempt IDs. Arguments can 
+ be given null, in which case that part of the regex will be generic.  
+ For example to obtain a regex matching <i>all task attempt IDs</i> 
+ of <i>any jobtracker</i>, in <i>any job</i>, of the <i>first 
+ map task</i>, we would use :
+ <pre> 
+ TaskAttemptID.getTaskAttemptIDsPattern(null, null, TaskType.MAP, 1, null);
+ </pre>
+ which will return :
+ <pre> "attempt_[^_]*_[0-9]*_m_000001_[0-9]*" </pre> 
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param type the {@link TaskType} 
+ @param taskId taskId number, or null
+ @param attemptId the task attempt number, or null
+ @return a regex pattern matching TaskAttemptIDs]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[TaskAttemptID represents the immutable and unique identifier for 
+ a task attempt. Each task attempt is one particular instance of a Map or
+ Reduce Task identified by its TaskID. 
+ 
+ TaskAttemptID consists of 2 parts. First part is the 
+ {@link TaskID}, that this TaskAttemptID belongs to.
+ Second part is the task attempt number. <br> 
+ An example TaskAttemptID is : 
+ <code>attempt_200707121733_0003_m_000005_0</code> , which represents the
+ zeroth task attempt for the fifth map task in the third job 
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskAttemptID strings
+ , but rather use appropriate constructors or {@link #forName(String)} 
+ method. 
+ 
+ @see JobID
+ @see TaskID]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.TaskAttemptID -->
+  <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent -->
+  <class name="TaskCompletionEvent" extends="org.apache.hadoop.mapreduce.TaskCompletionEvent"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TaskCompletionEvent"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor for Writable.]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskCompletionEvent" type="int, org.apache.hadoop.mapred.TaskAttemptID, int, boolean, org.apache.hadoop.mapred.TaskCompletionEvent.Status, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor. eventId should be created externally and incremented
+ per event for each job. 
+ @param eventId event id, event id should be unique and assigned in
+  incrementally, starting from 0. 
+ @param taskId task id
+ @param status task's status 
+ @param taskTrackerHttp task tracker's host:port for http.]]>
+      </doc>
+    </constructor>
+    <method name="getTaskId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #getTaskAttemptId()} instead.">
+      <doc>
+      <![CDATA[Returns task id. 
+ @return task id
+ @deprecated use {@link #getTaskAttemptId()} instead.]]>
+      </doc>
+    </method>
+    <method name="getTaskAttemptId" return="org.apache.hadoop.mapred.TaskAttemptID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns task id. 
+ @return task id]]>
+      </doc>
+    </method>
+    <method name="getTaskStatus" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns {@link Status}
+ @return task completion status]]>
+      </doc>
+    </method>
+    <method name="setTaskId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #setTaskAttemptId(TaskAttemptID)} instead.">
+      <param name="taskId" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Sets task id. 
+ @param taskId
+ @deprecated use {@link #setTaskAttemptId(TaskAttemptID)} instead.]]>
+      </doc>
+    </method>
+    <method name="setTaskID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use {@link #setTaskAttemptId(TaskAttemptID)} instead.">
+      <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+      <doc>
+      <![CDATA[Sets task id.
+ @param taskId
+ @deprecated use {@link #setTaskAttemptId(TaskAttemptID)} instead.]]>
+      </doc>
+    </method>
+    <method name="setTaskAttemptId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="taskId" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+      <doc>
+      <![CDATA[Sets task id. 
+ @param taskId]]>
+      </doc>
+    </method>
+    <field name="EMPTY_ARRAY" type="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This is used to track task completion events on 
+ job tracker.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent -->
+  <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+  <class name="TaskCompletionEvent.Status" extends="java.lang.Enum"
+    abstract="false"
+    static="true" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent.Status -->
+  <!-- start class org.apache.hadoop.mapred.TaskID -->
+  <class name="TaskID" extends="org.apache.hadoop.mapreduce.TaskID"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TaskID" type="org.apache.hadoop.mapreduce.JobID, boolean, int"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #TaskID(String, int, TaskType, int)}">
+      <doc>
+      <![CDATA[Constructs a TaskID object from given {@link JobID}.  
+ @param jobId JobID that this tip belongs to 
+ @param isMap whether the tip is a map 
+ @param id the tip number
+ @deprecated Use {@link #TaskID(String, int, TaskType, int)}]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskID" type="java.lang.String, int, boolean, int"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #TaskID(org.apache.hadoop.mapreduce.JobID, TaskType,
+ int)}">
+      <doc>
+      <![CDATA[Constructs a TaskInProgressId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number 
+ @param isMap whether the tip is a map 
+ @param id the tip number
+ @deprecated Use {@link #TaskID(org.apache.hadoop.mapreduce.JobID, TaskType,
+ int)}]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskID" type="org.apache.hadoop.mapreduce.JobID, org.apache.hadoop.mapreduce.TaskType, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a TaskID object from given {@link JobID}.  
+ @param jobId JobID that this tip belongs to 
+ @param type the {@link TaskType} 
+ @param id the tip number]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskID" type="java.lang.String, int, org.apache.hadoop.mapreduce.TaskType, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a TaskInProgressId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number 
+ @param type the {@link TaskType} 
+ @param id the tip number]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskID"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="downgrade" return="org.apache.hadoop.mapred.TaskID"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="old" type="org.apache.hadoop.mapreduce.TaskID"/>
+      <doc>
+      <![CDATA[Downgrade a new TaskID to an old one
+ @param old a new or old TaskID
+ @return either old or a new TaskID build to match old]]>
+      </doc>
+    </method>
+    <method name="read" return="org.apache.hadoop.mapred.TaskID"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getJobID" return="org.apache.hadoop.mapred.JobID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTaskIDsPattern" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link TaskID#getTaskIDsPattern(String, Integer, TaskType,
+ Integer)}">
+      <param name="jtIdentifier" type="java.lang.String"/>
+      <param name="jobId" type="java.lang.Integer"/>
+      <param name="isMap" type="java.lang.Boolean"/>
+      <param name="taskId" type="java.lang.Integer"/>
+      <doc>
+      <![CDATA[Returns a regex pattern which matches task IDs. Arguments can 
+ be given null, in which case that part of the regex will be generic.  
+ For example to obtain a regex matching <i>the first map task</i> 
+ of <i>any jobtracker</i>, of <i>any job</i>, we would use :
+ <pre> 
+ TaskID.getTaskIDsPattern(null, null, true, 1);
+ </pre>
+ which will return :
+ <pre> "task_[^_]*_[0-9]*_m_000001*" </pre> 
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param isMap whether the tip is a map, or null 
+ @param taskId taskId number, or null
+ @return a regex pattern matching TaskIDs
+ @deprecated Use {@link TaskID#getTaskIDsPattern(String, Integer, TaskType,
+ Integer)}]]>
+      </doc>
+    </method>
+    <method name="getTaskIDsPattern" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jtIdentifier" type="java.lang.String"/>
+      <param name="jobId" type="java.lang.Integer"/>
+      <param name="type" type="org.apache.hadoop.mapreduce.TaskType"/>
+      <param name="taskId" type="java.lang.Integer"/>
+      <doc>
+      <![CDATA[Returns a regex pattern which matches task IDs. Arguments can 
+ be given null, in which case that part of the regex will be generic.  
+ For example to obtain a regex matching <i>the first map task</i> 
+ of <i>any jobtracker</i>, of <i>any job</i>, we would use :
+ <pre> 
+ TaskID.getTaskIDsPattern(null, null, true, 1);
+ </pre>
+ which will return :
+ <pre> "task_[^_]*_[0-9]*_m_000001*" </pre> 
+ @param jtIdentifier jobTracker identifier, or null
+ @param jobId job number, or null
+ @param type the {@link TaskType}, or null 
+ @param taskId taskId number, or null
+ @return a regex pattern matching TaskIDs]]>
+      </doc>
+    </method>
+    <method name="forName" return="org.apache.hadoop.mapred.TaskID"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="str" type="java.lang.String"/>
+      <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+    </method>
+    <doc>
+    <![CDATA[TaskID represents the immutable and unique identifier for 
+ a Map or Reduce Task. Each TaskID encompasses multiple attempts made to
+ execute the Map or Reduce Task, each of which are uniquely indentified by
+ their TaskAttemptID.
+ 
+ TaskID consists of 3 parts. First part is the {@link JobID}, that this 
+ TaskInProgress belongs to. Second part of the TaskID is either 'm' or 'r' 
+ representing whether the task is a map task or a reduce task. 
+ And the third part is the task number. <br> 
+ An example TaskID is : 
+ <code>task_200707121733_0003_m_000005</code> , which represents the
+ fifth map task in the third job running at the jobtracker 
+ started at <code>200707121733</code>. 
+ <p>
+ Applications should never construct or parse TaskID strings
+ , but rather use appropriate constructors or {@link #forName(String)} 
+ method. 
+ 
+ @see JobID
+ @see TaskAttemptID]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.TaskID -->
+  <!-- start class org.apache.hadoop.mapred.TaskReport -->
+  <class name="TaskReport" extends="org.apache.hadoop.mapreduce.TaskReport"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TaskReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getTaskId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The string of the task id.]]>
+      </doc>
+    </method>
+    <method name="getTaskID" return="org.apache.hadoop.mapred.TaskID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The id of the task.]]>
+      </doc>
+    </method>
+    <method name="getCounters" return="org.apache.hadoop.mapred.Counters"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setSuccessfulAttempt"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="t" type="org.apache.hadoop.mapred.TaskAttemptID"/>
+      <doc>
+      <![CDATA[set successful attempt ID of the task.]]>
+      </doc>
+    </method>
+    <method name="getSuccessfulTaskAttempt" return="org.apache.hadoop.mapred.TaskAttemptID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the attempt ID that took this task to completion]]>
+      </doc>
+    </method>
+    <method name="setRunningTaskAttempts"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="runningAttempts" type="java.util.Collection"/>
+      <doc>
+      <![CDATA[set running attempt(s) of the task.]]>
+      </doc>
+    </method>
+    <method name="getRunningTaskAttempts" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the running task attempt IDs for this task]]>
+      </doc>
+    </method>
+    <method name="setFinishTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="finishTime" type="long"/>
+      <doc>
+      <![CDATA[set finish time of task. 
+ @param finishTime finish time of task.]]>
+      </doc>
+    </method>
+    <method name="setStartTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="startTime" type="long"/>
+      <doc>
+      <![CDATA[set start time of the task.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A report on the state of a task.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.TaskReport -->
+  <!-- start class org.apache.hadoop.mapred.TextInputFormat -->
+  <class name="TextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+    <constructor name="TextInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <method name="isSplitable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[An {@link InputFormat} for plain text files.  Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line.  Keys are
+ the position in the file, and values are the line of text..]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.TextInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.TextOutputFormat -->
+  <class name="TextOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TextOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[An {@link OutputFormat} that writes plain text files.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.TextOutputFormat -->
+  <!-- start class org.apache.hadoop.mapred.Utils -->
+  <class name="Utils" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Utils"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[A utility class. It provides
+   A path filter utility to filter out output/part files in the output dir]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.Utils -->
+</package>
+<package name="org.apache.hadoop.mapred.jobcontrol">
+  <!-- start class org.apache.hadoop.mapred.jobcontrol.Job -->
+  <class name="Job" extends="org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Job" type="org.apache.hadoop.mapred.JobConf, java.util.ArrayList"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct a job.
+ @param jobConf a mapred job configuration representing a job to be executed.
+ @param dependingJobs an array of jobs the current job depends on]]>
+      </doc>
+    </constructor>
+    <constructor name="Job" type="org.apache.hadoop.mapred.JobConf"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="getAssignedJobID" return="org.apache.hadoop.mapred.JobID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the mapred ID of this job as assigned by the mapred framework.]]>
+      </doc>
+    </method>
+    <method name="setAssignedJobID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="setAssignedJobID should not be called.
+ JOBID is set by the framework.">
+      <param name="mapredJobID" type="org.apache.hadoop.mapred.JobID"/>
+      <doc>
+      <![CDATA[@deprecated setAssignedJobID should not be called.
+ JOBID is set by the framework.]]>
+      </doc>
+    </method>
+    <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the mapred job conf of this job]]>
+      </doc>
+    </method>
+    <method name="setJobConf"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Set the mapred job conf for this job.
+ @param jobConf the mapred job conf for this job.]]>
+      </doc>
+    </method>
+    <method name="getState" return="int"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the state of this job]]>
+      </doc>
+    </method>
+    <method name="setState"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="state" type="int"/>
+      <doc>
+      <![CDATA[This is a no-op function, Its a behavior change from 1.x We no more can
+ change the state from job
+ 
+ @param state
+          the new state for this job.]]>
+      </doc>
+    </method>
+    <method name="addDependingJob" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dependingJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+      <doc>
+      <![CDATA[Add a job to this jobs' dependency list. 
+ Dependent jobs can only be added while a Job 
+ is waiting to run, not during or afterwards.
+ 
+ @param dependingJob Job that this Job depends on.
+ @return <tt>true</tt> if the Job was added.]]>
+      </doc>
+    </method>
+    <method name="getJobClient" return="org.apache.hadoop.mapred.JobClient"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the job client of this job]]>
+      </doc>
+    </method>
+    <method name="getDependingJobs" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the depending jobs of this job]]>
+      </doc>
+    </method>
+    <method name="getMapredJobID" return="java.lang.String"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the mapred ID of this job as assigned by the mapred framework.]]>
+      </doc>
+    </method>
+    <method name="setMapredJobID"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="mapredJobID" type="java.lang.String"/>
+      <doc>
+      <![CDATA[This is no-op method for backward compatibility. It's a behavior change
+ from 1.x, we can not change job ids from job.
+ 
+ @param mapredJobID
+          the mapred job ID for this job.]]>
+      </doc>
+    </method>
+    <field name="SUCCESS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="WAITING" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RUNNING" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="READY" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FAILED" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEPENDENT_FAILED" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.jobcontrol.Job -->
+  <!-- start class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+  <class name="JobControl" extends="org.apache.hadoop.mapreduce.lib.jobcontrol.JobControl"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="JobControl" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a job control for a group of jobs.
+ @param groupName a name identifying this group]]>
+      </doc>
+    </constructor>
+    <method name="getWaitingJobs" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the jobs in the waiting state]]>
+      </doc>
+    </method>
+    <method name="getRunningJobs" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the jobs in the running state]]>
+      </doc>
+    </method>
+    <method name="getReadyJobs" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the jobs in the ready state]]>
+      </doc>
+    </method>
+    <method name="getSuccessfulJobs" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the jobs in the success state]]>
+      </doc>
+    </method>
+    <method name="getFailedJobs" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="addJobs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobs" type="java.util.Collection"/>
+      <doc>
+      <![CDATA[Add a collection of jobs
+ 
+ @param jobs]]>
+      </doc>
+    </method>
+    <method name="getState" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the thread state]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.jobcontrol.JobControl -->
+</package>
+<package name="org.apache.hadoop.mapred.join">
+  <!-- start class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+  <class name="ArrayListBackedIterator" extends="org.apache.hadoop.mapreduce.lib.join.ArrayListBackedIterator"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.join.ResetableIterator"/>
+    <constructor name="ArrayListBackedIterator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ArrayListBackedIterator" type="java.util.ArrayList"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This class provides an implementation of ResetableIterator. The
+ implementation uses an {@link java.util.ArrayList} to store elements
+ added to it, replaying them as requested.
+ Prefer {@link StreamBackedIterator}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.ArrayListBackedIterator -->
+  <!-- start interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+  <interface name="ComposableInputFormat"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.InputFormat"/>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Refinement of InputFormat requiring implementors to provide
+ ComposableRecordReader instead of RecordReader.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.join.ComposableInputFormat -->
+  <!-- start interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+  <interface name="ComposableRecordReader"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.RecordReader"/>
+    <implements name="java.lang.Comparable"/>
+    <method name="id" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the position in the collector this class occupies.]]>
+      </doc>
+    </method>
+    <method name="key" return="K"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the key this RecordReader would supply on a call to next(K,V)]]>
+      </doc>
+    </method>
+    <method name="key"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Clone the key at the head of this RecordReader into the object provided.]]>
+      </doc>
+    </method>
+    <method name="hasNext" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns true if the stream is not empty, but provides no guarantee that
+ a call to next(K,V) will succeed.]]>
+      </doc>
+    </method>
+    <method name="skip"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+      </doc>
+    </method>
+    <method name="accept"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+      <param name="key" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[While key-value pairs from this RecordReader match the given key, register
+ them with the JoinCollector provided.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Additional operations required of a RecordReader to participate in a join.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.join.ComposableRecordReader -->
+  <!-- start class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+  <class name="CompositeInputFormat" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat"/>
+    <constructor name="CompositeInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setFormat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Interpret a given string as a composite expression.
+ {@code
+   func  ::= <ident>([<func>,]*<func>)
+   func  ::= tbl(<class>,"<path>")
+   class ::= @see java.lang.Class#forName(java.lang.String)
+   path  ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String)
+ }
+ Reads expression from the <tt>mapred.join.expr</tt> property and
+ user-supplied join types from <tt>mapred.join.define.&lt;ident&gt;</tt>
+  types. Paths supplied to <tt>tbl</tt> are given as input paths to the
+ InputFormat class listed.
+ @see #compose(java.lang.String, java.lang.Class, java.lang.String...)]]>
+      </doc>
+    </method>
+    <method name="addDefaults"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Adds the default set of identifiers to the parser.]]>
+      </doc>
+    </method>
+    <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="numSplits" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Build a CompositeInputSplit from the child InputFormats by assigning the
+ ith split from each child to the ith composite split.]]>
+      </doc>
+    </method>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct a CompositeRecordReader for the children of this InputFormat
+ as defined in the init expression.
+ The outermost join need only be composable, not necessarily a composite.
+ Mandating TupleWritable isn't strictly correct.]]>
+      </doc>
+    </method>
+    <method name="compose" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="inf" type="java.lang.Class"/>
+      <param name="path" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Convenience method for constructing composite formats.
+ Given InputFormat class (inf), path (p) return:
+ {@code tbl(<inf>, <p>) }]]>
+      </doc>
+    </method>
+    <method name="compose" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="op" type="java.lang.String"/>
+      <param name="inf" type="java.lang.Class"/>
+      <param name="path" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+      </doc>
+    </method>
+    <method name="compose" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="op" type="java.lang.String"/>
+      <param name="inf" type="java.lang.Class"/>
+      <param name="path" type="org.apache.hadoop.fs.Path[]"/>
+      <doc>
+      <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[An InputFormat capable of performing joins over a set of data sources sorted
+ and partitioned the same way.
+
+ A user may define new join types by setting the property
+ <tt>mapred.join.define.&lt;ident&gt;</tt> to a classname. In the expression
+ <tt>mapred.join.expr</tt>, the identifier will be assumed to be a
+ ComposableRecordReader.
+ <tt>mapred.join.keycomparator</tt> can be a classname used to compare keys
+ in the join.
+ @see #setFormat
+ @see JoinRecordReader
+ @see MultiFilterRecordReader]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.CompositeInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+  <class name="CompositeInputSplit" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.InputSplit"/>
+    <constructor name="CompositeInputSplit"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="CompositeInputSplit" type="int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="s" type="org.apache.hadoop.mapred.InputSplit"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Add an InputSplit to this collection.
+ @throws IOException If capacity was not specified during construction
+                     or if capacity has been reached.]]>
+      </doc>
+    </method>
+    <method name="get" return="org.apache.hadoop.mapred.InputSplit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="i" type="int"/>
+      <doc>
+      <![CDATA[Get ith child InputSplit.]]>
+      </doc>
+    </method>
+    <method name="getLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the aggregate length of all child InputSplits currently added.]]>
+      </doc>
+    </method>
+    <method name="getLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="i" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the length of ith child InputSplit.]]>
+      </doc>
+    </method>
+    <method name="getLocations" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Collect a set of hosts from all child InputSplits.]]>
+      </doc>
+    </method>
+    <method name="getLocation" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="i" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[getLocations from ith InputSplit.]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write splits in the following format.
+ {@code
+ <count><class1><class2>...<classn><split1><split2>...<splitn>
+ }]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}
+ @throws IOException If the child InputSplit cannot be read, typically
+                     for faliing access checks.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This InputSplit contains a set of child InputSplits. Any InputSplit inserted
+ into this collection must have a public default constructor.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.CompositeInputSplit -->
+  <!-- start class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+  <class name="CompositeRecordReader" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="CompositeRecordReader" type="int, int, java.lang.Class"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a RecordReader with <tt>capacity</tt> children to position
+ <tt>id</tt> in the parent reader.
+ The id of a root CompositeRecordReader is -1 by convention, but relying
+ on this is not recommended.]]>
+      </doc>
+    </constructor>
+    <method name="combine" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="srcs" type="java.lang.Object[]"/>
+      <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+    </method>
+    <method name="id" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the position in the collector this class occupies.]]>
+      </doc>
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getRecordReaderQueue" return="java.util.PriorityQueue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return sorted list of RecordReaders for this composite.]]>
+      </doc>
+    </method>
+    <method name="getComparator" return="org.apache.hadoop.io.WritableComparator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return comparator defining the ordering for RecordReaders in this
+ composite.]]>
+      </doc>
+    </method>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="rr" type="org.apache.hadoop.mapred.join.ComposableRecordReader"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Add a RecordReader to this collection.
+ The id() of a RecordReader determines where in the Tuple its
+ entry will appear. Adding RecordReaders with the same id has
+ undefined behavior.]]>
+      </doc>
+    </method>
+    <method name="key" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the key for the current join or the value at the top of the
+ RecordReader heap.]]>
+      </doc>
+    </method>
+    <method name="key"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Clone the key at the top of this RR into the given object.]]>
+      </doc>
+    </method>
+    <method name="hasNext" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return true if it is possible that this could emit more values.]]>
+      </doc>
+    </method>
+    <method name="skip"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Pass skip key to child RRs.]]>
+      </doc>
+    </method>
+    <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain an iterator over the child RRs apropos of the value type
+ ultimately emitted from this join.]]>
+      </doc>
+    </method>
+    <method name="accept"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+      <param name="key" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[If key provided matches that of this Composite, give JoinCollector
+ iterator over values it may emit.]]>
+      </doc>
+    </method>
+    <method name="fillJoinCollector"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="iterkey" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[For all child RRs offering the key provided, obtain an iterator
+ at that position in the JoinCollector.]]>
+      </doc>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader"/>
+      <doc>
+      <![CDATA[Implement Comparable contract (compare key of join or head of heap
+ with that of another).]]>
+      </doc>
+    </method>
+    <method name="createKey" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new key value common to all child RRs.
+ @throws ClassCastException if key classes differ.]]>
+      </doc>
+    </method>
+    <method name="createInternalValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a value to be used internally for joins.]]>
+      </doc>
+    </method>
+    <method name="getPos" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Unsupported (returns zero in all cases).]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Close all child RRs.]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Report progress as the minimum of all child RR progress.]]>
+      </doc>
+    </method>
+    <field name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="kids" type="org.apache.hadoop.mapred.join.ComposableRecordReader[]"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A RecordReader that can effect joins of RecordReaders sharing a common key
+ type and partitioning.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.CompositeRecordReader -->
+  <!-- start class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+  <class name="InnerJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="combine" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="srcs" type="java.lang.Object[]"/>
+      <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+      <doc>
+      <![CDATA[Return true iff the tuple is full (all data sources contain this key).]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Full inner join.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.InnerJoinRecordReader -->
+  <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader -->
+  <class name="JoinRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader"/>
+    <constructor name="JoinRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="next" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Emit the next set of key, value pairs as defined by the child
+ RecordReaders and operation associated with this composite RR.]]>
+      </doc>
+    </method>
+    <method name="createValue" return="org.apache.hadoop.mapred.join.TupleWritable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return an iterator wrapping the JoinCollector.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Base class for Composite joins returning Tuples of arbitrary Writables.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader -->
+  <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+  <class name="MultiFilterRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader"/>
+    <constructor name="MultiFilterRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="emit" return="V"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[For each tuple emitted, return a value (typically one of the values
+ in the tuple).
+ Modifying the Writables in the tuple is permitted and unlikely to affect
+ join behavior in most cases, but it is not recommended. It's safer to
+ clone first.]]>
+      </doc>
+    </method>
+    <method name="combine" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="srcs" type="java.lang.Object[]"/>
+      <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+      <doc>
+      <![CDATA[Default implementation offers {@link #emit} every Tuple from the
+ collector (the outer join of child RRs).]]>
+      </doc>
+    </method>
+    <method name="next" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="createValue" return="V"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return an iterator returning a single value from the tuple.
+ @see MultiFilterDelegationIterator]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Base class for Composite join returning values derived from multiple
+ sources, but generally not tuples.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader -->
+  <!-- start class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+  <class name="OuterJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="combine" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="srcs" type="java.lang.Object[]"/>
+      <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+      <doc>
+      <![CDATA[Emit everything from the collector.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Full outer join.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.OuterJoinRecordReader -->
+  <!-- start class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+  <class name="OverrideRecordReader" extends="org.apache.hadoop.mapred.join.MultiFilterRecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="emit" return="V"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/>
+      <doc>
+      <![CDATA[Emit the value with the highest position in the tuple.]]>
+      </doc>
+    </method>
+    <method name="fillJoinCollector"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="iterkey" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Instead of filling the JoinCollector with iterators from all
+ data sources, fill only the rightmost for this key.
+ This not only saves space by discarding the other sources, but
+ it also emits the number of key-value pairs in the preferred
+ RecordReader instead of repeating that stream n times, where
+ n is the cardinality of the cross product of the discarded
+ streams for the given key.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Prefer the &quot;rightmost&quot; data source for this key.
+ For example, <tt>override(S1,S2,S3)</tt> will prefer values
+ from S3 over S2, and values from S2 over S1 for all keys
+ emitted from all sources.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.OverrideRecordReader -->
+  <!-- start class org.apache.hadoop.mapred.join.Parser -->
+  <class name="Parser" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Parser"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Very simple shift-reduce parser for join expressions.
+
+ This should be sufficient for the user extension permitted now, but ought to
+ be replaced with a parser generator if more complex grammars are supported.
+ In particular, this &quot;shift-reduce&quot; parser has no states. Each set
+ of formals requires a different internal node type, which is responsible for
+ interpreting the list of tokens it receives. This is sufficient for the
+ current grammar, but it has several annoying properties that might inhibit
+ extension. In particular, parenthesis are always function calls; an
+ algebraic or filter grammar would not only require a node type, but must
+ also work around the internals of this parser.
+
+ For most other cases, adding classes to the hierarchy- particularly by
+ extending JoinRecordReader and MultiFilterRecordReader- is fairly
+ straightforward. One need only override the relevant method(s) (usually only
+ {@link CompositeRecordReader#combine}) and include a property to map its
+ value to an identifier in the parser.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.Parser -->
+  <!-- start class org.apache.hadoop.mapred.join.Parser.Node -->
+  <class name="Parser.Node" extends="java.lang.Object"
+    abstract="true"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat"/>
+    <constructor name="Node" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="addIdentifier"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="ident" type="java.lang.String"/>
+      <param name="mcstrSig" type="java.lang.Class[]"/>
+      <param name="nodetype" type="java.lang.Class"/>
+      <param name="cl" type="java.lang.Class"/>
+      <exception name="NoSuchMethodException" type="java.lang.NoSuchMethodException"/>
+      <doc>
+      <![CDATA[For a given identifier, add a mapping to the nodetype for the parse
+ tree and to the ComposableRecordReader to be created, including the
+ formals required to invoke the constructor.
+ The nodetype and constructor signature should be filled in from the
+ child node.]]>
+      </doc>
+    </method>
+    <method name="setID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="id" type="int"/>
+    </method>
+    <method name="setKeyComparator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="cmpcl" type="java.lang.Class"/>
+    </method>
+    <field name="rrCstrMap" type="java.util.Map"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="id" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="ident" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="cmpcl" type="java.lang.Class"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.Parser.Node -->
+  <!-- start class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+  <class name="Parser.NodeToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.Parser.NodeToken -->
+  <!-- start class org.apache.hadoop.mapred.join.Parser.NumToken -->
+  <class name="Parser.NumToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NumToken" type="double"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNum" return="double"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.Parser.NumToken -->
+  <!-- start class org.apache.hadoop.mapred.join.Parser.StrToken -->
+  <class name="Parser.StrToken" extends="org.apache.hadoop.mapred.join.Parser.Token"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="StrToken" type="org.apache.hadoop.mapred.join.Parser.TType, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getStr" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.Parser.StrToken -->
+  <!-- start class org.apache.hadoop.mapred.join.Parser.Token -->
+  <class name="Parser.Token" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getType" return="org.apache.hadoop.mapred.join.Parser.TType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getNum" return="double"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getStr" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Tagged-union type for tokens from the join expression.
+ @see Parser.TType]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.Parser.Token -->
+  <!-- start class org.apache.hadoop.mapred.join.Parser.TType -->
+  <class name="Parser.TType" extends="java.lang.Enum"
+    abstract="false"
+    static="true" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.mapred.join.Parser.TType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.mapred.join.Parser.TType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.Parser.TType -->
+  <!-- start interface org.apache.hadoop.mapred.join.ResetableIterator -->
+  <interface name="ResetableIterator"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.join.ResetableIterator"/>
+    <doc>
+    <![CDATA[This defines an interface to a stateful Iterator that can replay elements
+ added to it directly.
+ Note that this does not extend {@link java.util.Iterator}.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.join.ResetableIterator -->
+  <!-- start class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+  <class name="StreamBackedIterator" extends="org.apache.hadoop.mapreduce.lib.join.StreamBackedIterator"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.join.ResetableIterator"/>
+    <constructor name="StreamBackedIterator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This class provides an implementation of ResetableIterator. This
+ implementation uses a byte array to store elements added to it.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.StreamBackedIterator -->
+  <!-- start class org.apache.hadoop.mapred.join.TupleWritable -->
+  <class name="TupleWritable" extends="org.apache.hadoop.mapreduce.lib.join.TupleWritable"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TupleWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an empty tuple with no allocated storage for writables.]]>
+      </doc>
+    </constructor>
+    <constructor name="TupleWritable" type="org.apache.hadoop.io.Writable[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Initialize tuple with storage; unknown whether any of them contain
+ &quot;written&quot; values.]]>
+      </doc>
+    </constructor>
+    <doc>
+    <![CDATA[Writable type storing multiple {@link org.apache.hadoop.io.Writable}s.
+
+ This is *not* a general-purpose tuple type. In almost all cases, users are
+ encouraged to implement their own serializable types, which can perform
+ better validation and provide more efficient encodings than this class is
+ capable. TupleWritable relies on the join framework for type safety and
+ assumes its instances will rarely be persisted, assumptions not only
+ incompatible with, but contrary to the general case.
+
+ @see org.apache.hadoop.io.Writable]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.TupleWritable -->
+  <!-- start class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+  <class name="WrappedRecordReader" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader"/>
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <method name="id" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="key" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the key at the head of this RR.]]>
+      </doc>
+    </method>
+    <method name="key"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="qkey" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Clone the key at the head of this RR into the object supplied.]]>
+      </doc>
+    </method>
+    <method name="hasNext" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return true if the RR- including the k,v pair stored in this object-
+ is exhausted.]]>
+      </doc>
+    </method>
+    <method name="skip"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+      </doc>
+    </method>
+    <method name="next" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read the next k,v pair into the head of this object; return true iff
+ the RR and this are exhausted.]]>
+      </doc>
+    </method>
+    <method name="accept"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="i" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/>
+      <param name="key" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Add an iterator to the collector at the position occupied by this
+ RecordReader over the values in this stream paired with the key
+ provided (ie register a stream of values from this source matching K
+ with a collector).]]>
+      </doc>
+    </method>
+    <method name="next" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="U"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write key-value pair at the head of this stream to the objects provided;
+ get next key-value pair from proxied RR.]]>
+      </doc>
+    </method>
+    <method name="createKey" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Request new key from proxied RR.]]>
+      </doc>
+    </method>
+    <method name="createValue" return="U"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Request new value from proxied RR.]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Request progress from proxied RR.]]>
+      </doc>
+    </method>
+    <method name="getPos" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Request position from proxied RR.]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Forward close request to proxied RR.]]>
+      </doc>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader"/>
+      <doc>
+      <![CDATA[Implement Comparable contract (compare key at head of proxied RR
+ with that of another).]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Return true iff compareTo(other) retn true.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Proxy class for a RecordReader participating in the join framework.
+ This class keeps track of the &quot;head&quot; key-value pair for the
+ provided RecordReader and keeps a store of values matching a key when
+ this source is participating in a join.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.join.WrappedRecordReader -->
+</package>
+<package name="org.apache.hadoop.mapred.lib">
+  <!-- start class org.apache.hadoop.mapred.lib.BinaryPartitioner -->
+  <class name="BinaryPartitioner" extends="org.apache.hadoop.mapreduce.lib.partition.BinaryPartitioner"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.Partitioner"/>
+    <constructor name="BinaryPartitioner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <doc>
+    <![CDATA[Partition {@link BinaryComparable} keys using a configurable part of 
+ the bytes array returned by {@link BinaryComparable#getBytes()}. 
+ 
+ @see org.apache.hadoop.mapreduce.lib.partition.BinaryPartitioner]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.BinaryPartitioner -->
+  <!-- start class org.apache.hadoop.mapred.lib.ChainMapper -->
+  <class name="ChainMapper" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.Mapper"/>
+    <constructor name="ChainMapper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor.]]>
+      </doc>
+    </constructor>
+    <method name="addMapper"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="klass" type="java.lang.Class"/>
+      <param name="inputKeyClass" type="java.lang.Class"/>
+      <param name="inputValueClass" type="java.lang.Class"/>
+      <param name="outputKeyClass" type="java.lang.Class"/>
+      <param name="outputValueClass" type="java.lang.Class"/>
+      <param name="byValue" type="boolean"/>
+      <param name="mapperConf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Adds a Mapper class to the chain job's JobConf.
+ <p>
+ It has to be specified how key and values are passed from one element of
+ the chain to the next, by value or by reference. If a Mapper leverages the
+ assumed semantics that the key and values are not modified by the collector
+ 'by value' must be used. If the Mapper does not expect this semantics, as
+ an optimization to avoid serialization and deserialization 'by reference'
+ can be used.
+ <p>
+ For the added Mapper the configuration given for it,
+ <code>mapperConf</code>, have precedence over the job's JobConf. This
+ precedence is in effect when the task is running.
+ <p>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainMapper, this is done by the addMapper for the last mapper in the chain
+ <p>
+
+ @param job              job's JobConf to add the Mapper class.
+ @param klass            the Mapper class to add.
+ @param inputKeyClass    mapper input key class.
+ @param inputValueClass  mapper input value class.
+ @param outputKeyClass   mapper output key class.
+ @param outputValueClass mapper output value class.
+ @param byValue          indicates if key/values should be passed by value
+ to the next Mapper in the chain, if any.
+ @param mapperConf       a JobConf with the configuration for the Mapper
+ class. It is recommended to use a JobConf without default values using the
+ <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.]]>
+      </doc>
+    </method>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Configures the ChainMapper and all the Mappers in the chain.
+ <p>
+ If this method is overriden <code>super.configure(...)</code> should be
+ invoked at the beginning of the overwriter method.]]>
+      </doc>
+    </method>
+    <method name="map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Object"/>
+      <param name="value" type="java.lang.Object"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Chains the <code>map(...)</code> methods of the Mappers in the chain.]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Closes  the ChainMapper and all the Mappers in the chain.
+ <p>
+ If this method is overriden <code>super.close()</code> should be
+ invoked at the end of the overwriter method.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The ChainMapper class allows to use multiple Mapper classes within a single
+ Map task.
+ <p>
+ The Mapper classes are invoked in a chained (or piped) fashion, the output of
+ the first becomes the input of the second, and so on until the last Mapper,
+ the output of the last Mapper will be written to the task's output.
+ <p>
+ The key functionality of this feature is that the Mappers in the chain do not
+ need to be aware that they are executed in a chain. This enables having
+ reusable specialized Mappers that can be combined to perform composite
+ operations within a single task.
+ <p>
+ Special care has to be taken when creating chains that the key/values output
+ by a Mapper are valid for the following Mapper in the chain. It is assumed
+ all Mappers and the Reduce in the chain use maching output and input key and
+ value classes as no conversion is done by the chaining code.
+ <p>
+ Using the ChainMapper and the ChainReducer classes is possible to compose
+ Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
+ immediate benefit of this pattern is a dramatic reduction in disk IO.
+ <p>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainMapper, this is done by the addMapper for the last mapper in the chain.
+ <p>
+ ChainMapper usage pattern:
+ <p>
+ <pre>
+ ...
+ conf.setJobName("chain");
+ conf.setInputFormat(TextInputFormat.class);
+ conf.setOutputFormat(TextOutputFormat.class);
+
+ JobConf mapAConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class,
+   Text.class, Text.class, true, mapAConf);
+
+ JobConf mapBConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class,
+   LongWritable.class, Text.class, false, mapBConf);
+
+ JobConf reduceConf = new JobConf(false);
+ ...
+ ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class,
+   Text.class, Text.class, true, reduceConf);
+
+ ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class,
+   LongWritable.class, Text.class, false, null);
+
+ ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class,
+   LongWritable.class, LongWritable.class, true, null);
+
+ FileInputFormat.setInputPaths(conf, inDir);
+ FileOutputFormat.setOutputPath(conf, outDir);
+ ...
+
+ JobClient jc = new JobClient(conf);
+ RunningJob job = jc.submitJob(conf);
+ ...
+ </pre>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.ChainMapper -->
+  <!-- start class org.apache.hadoop.mapred.lib.ChainReducer -->
+  <class name="ChainReducer" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.Reducer"/>
+    <constructor name="ChainReducer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor.]]>
+      </doc>
+    </constructor>
+    <method name="setReducer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="klass" type="java.lang.Class"/>
+      <param name="inputKeyClass" type="java.lang.Class"/>
+      <param name="inputValueClass" type="java.lang.Class"/>
+      <param name="outputKeyClass" type="java.lang.Class"/>
+      <param name="outputValueClass" type="java.lang.Class"/>
+      <param name="byValue" type="boolean"/>
+      <param name="reducerConf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Sets the Reducer class to the chain job's JobConf.
+ <p>
+ It has to be specified how key and values are passed from one element of
+ the chain to the next, by value or by reference. If a Reducer leverages the
+ assumed semantics that the key and values are not modified by the collector
+ 'by value' must be used. If the Reducer does not expect this semantics, as
+ an optimization to avoid serialization and deserialization 'by reference'
+ can be used.
+ <p>
+ For the added Reducer the configuration given for it,
+ <code>reducerConf</code>, have precedence over the job's JobConf. This
+ precedence is in effect when the task is running.
+ <p>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainReducer, this is done by the setReducer or the addMapper for the last
+ element in the chain.
+
+ @param job              job's JobConf to add the Reducer class.
+ @param klass            the Reducer class to add.
+ @param inputKeyClass    reducer input key class.
+ @param inputValueClass  reducer input value class.
+ @param outputKeyClass   reducer output key class.
+ @param outputValueClass reducer output value class.
+ @param byValue          indicates if key/values should be passed by value
+ to the next Mapper in the chain, if any.
+ @param reducerConf      a JobConf with the configuration for the Reducer
+ class. It is recommended to use a JobConf without default values using the
+ <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.]]>
+      </doc>
+    </method>
+    <method name="addMapper"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="klass" type="java.lang.Class"/>
+      <param name="inputKeyClass" type="java.lang.Class"/>
+      <param name="inputValueClass" type="java.lang.Class"/>
+      <param name="outputKeyClass" type="java.lang.Class"/>
+      <param name="outputValueClass" type="java.lang.Class"/>
+      <param name="byValue" type="boolean"/>
+      <param name="mapperConf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Adds a Mapper class to the chain job's JobConf.
+ <p>
+ It has to be specified how key and values are passed from one element of
+ the chain to the next, by value or by reference. If a Mapper leverages the
+ assumed semantics that the key and values are not modified by the collector
+ 'by value' must be used. If the Mapper does not expect this semantics, as
+ an optimization to avoid serialization and deserialization 'by reference'
+ can be used.
+ <p>
+ For the added Mapper the configuration given for it,
+ <code>mapperConf</code>, have precedence over the job's JobConf. This
+ precedence is in effect when the task is running.
+ <p>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainMapper, this is done by the addMapper for the last mapper in the chain
+ .
+
+ @param job              chain job's JobConf to add the Mapper class.
+ @param klass            the Mapper class to add.
+ @param inputKeyClass    mapper input key class.
+ @param inputValueClass  mapper input value class.
+ @param outputKeyClass   mapper output key class.
+ @param outputValueClass mapper output value class.
+ @param byValue          indicates if key/values should be passed by value
+ to the next Mapper in the chain, if any.
+ @param mapperConf       a JobConf with the configuration for the Mapper
+ class. It is recommended to use a JobConf without default values using the
+ <code>JobConf(boolean loadDefaults)</code> constructor with FALSE.]]>
+      </doc>
+    </method>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Configures the ChainReducer, the Reducer and all the Mappers in the chain.
+ <p>
+ If this method is overriden <code>super.configure(...)</code> should be
+ invoked at the beginning of the overwriter method.]]>
+      </doc>
+    </method>
+    <method name="reduce"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Object"/>
+      <param name="values" type="java.util.Iterator"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Chains the <code>reduce(...)</code> method of the Reducer with the
+ <code>map(...) </code> methods of the Mappers in the chain.]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Closes  the ChainReducer, the Reducer and all the Mappers in the chain.
+ <p>
+ If this method is overriden <code>super.close()</code> should be
+ invoked at the end of the overwriter method.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The ChainReducer class allows to chain multiple Mapper classes after a
+ Reducer within the Reducer task.
+ <p>
+ For each record output by the Reducer, the Mapper classes are invoked in a
+ chained (or piped) fashion, the output of the first becomes the input of the
+ second, and so on until the last Mapper, the output of the last Mapper will
+ be written to the task's output.
+ <p>
+ The key functionality of this feature is that the Mappers in the chain do not
+ need to be aware that they are executed after the Reducer or in a chain.
+ This enables having reusable specialized Mappers that can be combined to
+ perform composite operations within a single task.
+ <p>
+ Special care has to be taken when creating chains that the key/values output
+ by a Mapper are valid for the following Mapper in the chain. It is assumed
+ all Mappers and the Reduce in the chain use maching output and input key and
+ value classes as no conversion is done by the chaining code.
+ <p>
+ Using the ChainMapper and the ChainReducer classes is possible to compose
+ Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
+ immediate benefit of this pattern is a dramatic reduction in disk IO.
+ <p>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainReducer, this is done by the setReducer or the addMapper for the last
+ element in the chain.
+ <p>
+ ChainReducer usage pattern:
+ <p>
+ <pre>
+ ...
+ conf.setJobName("chain");
+ conf.setInputFormat(TextInputFormat.class);
+ conf.setOutputFormat(TextOutputFormat.class);
+
+ JobConf mapAConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, AMap.class, LongWritable.class, Text.class,
+   Text.class, Text.class, true, mapAConf);
+
+ JobConf mapBConf = new JobConf(false);
+ ...
+ ChainMapper.addMapper(conf, BMap.class, Text.class, Text.class,
+   LongWritable.class, Text.class, false, mapBConf);
+
+ JobConf reduceConf = new JobConf(false);
+ ...
+ ChainReducer.setReducer(conf, XReduce.class, LongWritable.class, Text.class,
+   Text.class, Text.class, true, reduceConf);
+
+ ChainReducer.addMapper(conf, CMap.class, Text.class, Text.class,
+   LongWritable.class, Text.class, false, null);
+
+ ChainReducer.addMapper(conf, DMap.class, LongWritable.class, Text.class,
+   LongWritable.class, LongWritable.class, true, null);
+
+ FileInputFormat.setInputPaths(conf, inDir);
+ FileOutputFormat.setOutputPath(conf, outDir);
+ ...
+
+ JobClient jc = new JobClient(conf);
+ RunningJob job = jc.submitJob(conf);
+ ...
+ </pre>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.ChainReducer -->
+  <!-- start class org.apache.hadoop.mapred.lib.CombineFileInputFormat -->
+  <class name="CombineFileInputFormat" extends="org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.InputFormat"/>
+    <constructor name="CombineFileInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[default constructor]]>
+      </doc>
+    </constructor>
+    <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="numSplits" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createPool"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="Use {@link #createPool(List)}.">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="filters" type="java.util.List"/>
+      <doc>
+      <![CDATA[Create a new pool and add the filters to it.
+ A split cannot have files from different pools.
+ @deprecated Use {@link #createPool(List)}.]]>
+      </doc>
+    </method>
+    <method name="createPool"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="Use {@link #createPool(PathFilter...)}.">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="filters" type="org.apache.hadoop.fs.PathFilter[]"/>
+      <doc>
+      <![CDATA[Create a new pool and add the filters to it. 
+ A pathname can satisfy any one of the specified filters.
+ A split cannot have files from different pools.
+ @deprecated Use {@link #createPool(PathFilter...)}.]]>
+      </doc>
+    </method>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This is not implemented yet.]]>
+      </doc>
+    </method>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression. 
+ 
+ @param job the job to list input paths for
+ @return array of FileStatus objects
+ @throws IOException if zero items.]]>
+      </doc>
+    </method>
+    <method name="isSplitable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <doc>
+    <![CDATA[An abstract {@link org.apache.hadoop.mapred.InputFormat} that returns {@link CombineFileSplit}'s
+ in {@link org.apache.hadoop.mapred.InputFormat#getSplits(JobConf, int)} method. 
+ Splits are constructed from the files under the input paths. 
+ A split cannot have files from different pools.
+ Each split returned may contain blocks from different files.
+ If a maxSplitSize is specified, then blocks on the same node are
+ combined to form a single split. Blocks that are left over are
+ then combined with other blocks in the same rack. 
+ If maxSplitSize is not specified, then blocks from the same rack
+ are combined in a single split; no attempt is made to create
+ node-local splits.
+ If the maxSplitSize is equal to the block size, then this class
+ is similar to the default spliting behaviour in Hadoop: each
+ block is a locally processed split.
+ Subclasses implement {@link org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit, JobConf, Reporter)}
+ to construct <code>RecordReader</code>'s for <code>CombineFileSplit</code>'s.
+ @see CombineFileSplit]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.CombineFileInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.lib.CombineFileRecordReader -->
+  <class name="CombineFileRecordReader" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.RecordReader"/>
+    <constructor name="CombineFileRecordReader" type="org.apache.hadoop.mapred.JobConf, org.apache.hadoop.mapred.lib.CombineFileSplit, org.apache.hadoop.mapred.Reporter, java.lang.Class"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[A generic RecordReader that can hand out different recordReaders
+ for each chunk in the CombineFileSplit.]]>
+      </doc>
+    </constructor>
+    <method name="next" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createKey" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createValue" return="V"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPos" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[return the amount of data processed]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[return progress based on the amount of data processed so far.]]>
+      </doc>
+    </method>
+    <method name="initNextRecordReader" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the record reader for the next chunk in this CombineFileSplit.]]>
+      </doc>
+    </method>
+    <field name="split" type="org.apache.hadoop.mapred.lib.CombineFileSplit"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="jc" type="org.apache.hadoop.mapred.JobConf"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="reporter" type="org.apache.hadoop.mapred.Reporter"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="rrConstructor" type="java.lang.reflect.Constructor"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="idx" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="progress" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="curReader" type="org.apache.hadoop.mapred.RecordReader"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A generic RecordReader that can hand out different recordReaders
+ for each chunk in a {@link CombineFileSplit}.
+ A CombineFileSplit can combine data chunks from multiple files. 
+ This class allows using different RecordReaders for processing
+ these data chunks from different files.
+ @see CombineFileSplit]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.CombineFileRecordReader -->
+  <!-- start class org.apache.hadoop.mapred.lib.CombineFileRecordReaderWrapper -->
+  <class name="CombineFileRecordReaderWrapper" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.RecordReader"/>
+    <constructor name="CombineFileRecordReaderWrapper" type="org.apache.hadoop.mapred.FileInputFormat, org.apache.hadoop.mapred.lib.CombineFileSplit, org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.Reporter, java.lang.Integer"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="next" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createKey" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createValue" return="V"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPos" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A wrapper class for a record reader that handles a single file split. It
+ delegates most of the methods to the wrapped instance. A concrete subclass
+ needs to provide a constructor that calls this parent constructor with the
+ appropriate input format. The subclass constructor must satisfy the specific
+ constructor signature that is required by
+ <code>CombineFileRecordReader</code>.
+
+ Subclassing is needed to get a concrete record reader wrapper because of the
+ constructor requirement.
+
+ @see CombineFileRecordReader
+ @see CombineFileInputFormat]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.CombineFileRecordReaderWrapper -->
+  <!-- start class org.apache.hadoop.mapred.lib.CombineFileSplit -->
+  <class name="CombineFileSplit" extends="org.apache.hadoop.mapreduce.lib.input.CombineFileSplit"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.InputSplit"/>
+    <constructor name="CombineFileSplit"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="CombineFileSplit" type="org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path[], long[], long[], java.lang.String[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="CombineFileSplit" type="org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path[], long[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="CombineFileSplit" type="org.apache.hadoop.mapred.lib.CombineFileSplit"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copy constructor]]>
+      </doc>
+    </constructor>
+    <method name="getJob" return="org.apache.hadoop.mapred.JobConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.CombineFileSplit -->
+  <!-- start class org.apache.hadoop.mapred.lib.CombineSequenceFileInputFormat -->
+  <class name="CombineSequenceFileInputFormat" extends="org.apache.hadoop.mapred.lib.CombineFileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CombineSequenceFileInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Input format that is a <code>CombineFileInputFormat</code>-equivalent for
+ <code>SequenceFileInputFormat</code>.
+
+ @see CombineFileInputFormat]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.CombineSequenceFileInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.lib.CombineTextInputFormat -->
+  <class name="CombineTextInputFormat" extends="org.apache.hadoop.mapred.lib.CombineFileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CombineTextInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Input format that is a <code>CombineFileInputFormat</code>-equivalent for
+ <code>TextInputFormat</code>.
+
+ @see CombineFileInputFormat]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.CombineTextInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+  <class name="FieldSelectionMapReduce" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.Mapper"/>
+    <implements name="org.apache.hadoop.mapred.Reducer"/>
+    <constructor name="FieldSelectionMapReduce"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="val" type="V"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The identify function. Input key/value pair is written directly to output.]]>
+      </doc>
+    </method>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="reduce"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.io.Text"/>
+      <param name="values" type="java.util.Iterator"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class implements a mapper/reducer class that can be used to perform
+ field selections in a manner similar to unix cut. The input data is treated
+ as fields separated by a user specified separator (the default value is
+ "\t"). The user can specify a list of fields that form the map output keys,
+ and a list of fields that form the map output values. If the inputformat is
+ TextInputFormat, the mapper will ignore the key to the map function. and the
+ fields are from the value only. Otherwise, the fields are the union of those
+ from the key and those from the value.
+ 
+ The field separator is under attribute "mapreduce.fieldsel.data.field.separator"
+ 
+ The map output field list spec is under attribute 
+ "mapreduce.fieldsel.map.output.key.value.fields.spec".
+ The value is expected to be like "keyFieldsSpec:valueFieldsSpec"
+ key/valueFieldsSpec are comma (,) separated field spec: fieldSpec,fieldSpec,fieldSpec ...
+ Each field spec can be a simple number (e.g. 5) specifying a specific field, or a range
+ (like 2-5) to specify a range of fields, or an open range (like 3-) specifying all 
+ the fields starting from field 3. The open range field spec applies value fields only.
+ They have no effect on the key fields.
+ 
+ Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields 4,3,0 and 1 for keys,
+ and use fields 6,5,1,2,3,7 and above for values.
+ 
+ The reduce output field list spec is under attribute 
+ "mapreduce.fieldsel.reduce.output.key.value.fields.spec".
+ 
+ The reducer extracts output key/value pairs in a similar manner, except that
+ the key is never ignored.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce -->
+  <!-- start class org.apache.hadoop.mapred.lib.FilterOutputFormat -->
+  <class name="FilterOutputFormat" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.OutputFormat"/>
+    <constructor name="FilterOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FilterOutputFormat" type="org.apache.hadoop.mapred.OutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a FilterOutputFormat based on the supplied output format.
+ @param out the underlying OutputFormat]]>
+      </doc>
+    </constructor>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="checkOutputSpecs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="baseOut" type="org.apache.hadoop.mapred.OutputFormat"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[FilterOutputFormat is a convenience class that wraps OutputFormat.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.FilterOutputFormat -->
+  <!-- start class org.apache.hadoop.mapred.lib.HashPartitioner -->
+  <class name="HashPartitioner" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.Partitioner"/>
+    <constructor name="HashPartitioner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <method name="getPartition" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K2"/>
+      <param name="value" type="V2"/>
+      <param name="numReduceTasks" type="int"/>
+      <doc>
+      <![CDATA[Use {@link Object#hashCode()} to partition.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Partition keys by their {@link Object#hashCode()}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.HashPartitioner -->
+  <!-- start class org.apache.hadoop.mapred.lib.IdentityMapper -->
+  <class name="IdentityMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.Mapper"/>
+    <constructor name="IdentityMapper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="val" type="V"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The identity function.  Input key/value pair is written directly to
+ output.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Implements the identity function, mapping inputs directly to outputs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.IdentityMapper -->
+  <!-- start class org.apache.hadoop.mapred.lib.IdentityReducer -->
+  <class name="IdentityReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.Reducer"/>
+    <constructor name="IdentityReducer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="reduce"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="values" type="java.util.Iterator"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes all keys and values directly to output.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Performs no reduction, writing all input values directly to the output.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.IdentityReducer -->
+  <!-- start class org.apache.hadoop.mapred.lib.InputSampler -->
+  <class name="InputSampler" extends="org.apache.hadoop.mapreduce.lib.partition.InputSampler"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="InputSampler" type="org.apache.hadoop.mapred.JobConf"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="writePartitionFile"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="sampler" type="org.apache.hadoop.mapred.lib.InputSampler.Sampler"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.InputSampler -->
+  <!-- start class org.apache.hadoop.mapred.lib.InverseMapper -->
+  <class name="InverseMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.Mapper"/>
+    <constructor name="InverseMapper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The inverse function.  Input keys and values are swapped.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A {@link Mapper} that swaps keys and values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.InverseMapper -->
+  <!-- start class org.apache.hadoop.mapred.lib.KeyFieldBasedComparator -->
+  <class name="KeyFieldBasedComparator" extends="org.apache.hadoop.mapreduce.lib.partition.KeyFieldBasedComparator"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+    <constructor name="KeyFieldBasedComparator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <doc>
+    <![CDATA[This comparator implementation provides a subset of the features provided
+ by the Unix/GNU Sort. In particular, the supported features are:
+ -n, (Sort numerically)
+ -r, (Reverse the result of comparison)
+ -k pos1[,pos2], where pos is of the form f[.c][opts], where f is the number
+  of the field to use, and c is the number of the first character from the
+  beginning of the field. Fields and character posns are numbered starting
+  with 1; a character position of zero in pos2 indicates the field's last
+  character. If '.c' is omitted from pos1, it defaults to 1 (the beginning
+  of the field); if omitted from pos2, it defaults to 0 (the end of the
+  field). opts are ordering options (any of 'nr' as described above). 
+ We assume that the fields in the key are separated by
+ {@link JobContext#MAP_OUTPUT_KEY_FIELD_SEPARATOR}]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.KeyFieldBasedComparator -->
+  <!-- start class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+  <class name="KeyFieldBasedPartitioner" extends="org.apache.hadoop.mapreduce.lib.partition.KeyFieldBasedPartitioner"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.Partitioner"/>
+    <constructor name="KeyFieldBasedPartitioner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <doc>
+    <![CDATA[Defines a way to partition keys based on certain key fields (also see
+  {@link KeyFieldBasedComparator}.
+  The key specification supported is of the form -k pos1[,pos2], where,
+  pos is of the form f[.c][opts], where f is the number
+  of the key field to use, and c is the number of the first character from
+  the beginning of the field. Fields and character posns are numbered 
+  starting with 1; a character position of zero in pos2 indicates the
+  field's last character. If '.c' is omitted from pos1, it defaults to 1
+  (the beginning of the field); if omitted from pos2, it defaults to 0 
+  (the end of the field).]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner -->
+  <!-- start class org.apache.hadoop.mapred.lib.LazyOutputFormat -->
+  <class name="LazyOutputFormat" extends="org.apache.hadoop.mapred.lib.FilterOutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="LazyOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setOutputFormatClass"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the underlying output format for LazyOutputFormat.
+ @param job the {@link JobConf} to modify
+ @param theClass the underlying class]]>
+      </doc>
+    </method>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="checkOutputSpecs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A Convenience class that creates output lazily.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.LazyOutputFormat -->
+  <!-- start class org.apache.hadoop.mapred.lib.LongSumReducer -->
+  <class name="LongSumReducer" extends="org.apache.hadoop.mapred.MapReduceBase"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.Reducer"/>
+    <constructor name="LongSumReducer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="reduce"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="values" type="java.util.Iterator"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A {@link Reducer} that sums long values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.LongSumReducer -->
+  <!-- start class org.apache.hadoop.mapred.lib.MultipleInputs -->
+  <class name="MultipleInputs" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MultipleInputs"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="addInputPath"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="inputFormatClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Add a {@link Path} with a custom {@link InputFormat} to the list of
+ inputs for the map-reduce job.
+ 
+ @param conf The configuration of the job
+ @param path {@link Path} to be added to the list of inputs for the job
+ @param inputFormatClass {@link InputFormat} class to use for this path]]>
+      </doc>
+    </method>
+    <method name="addInputPath"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="inputFormatClass" type="java.lang.Class"/>
+      <param name="mapperClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Add a {@link Path} with a custom {@link InputFormat} and
+ {@link Mapper} to the list of inputs for the map-reduce job.
+ 
+ @param conf The configuration of the job
+ @param path {@link Path} to be added to the list of inputs for the job
+ @param inputFormatClass {@link InputFormat} class to use for this path
+ @param mapperClass {@link Mapper} class to use for this path]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class supports MapReduce jobs that have multiple input paths with
+ a different {@link InputFormat} and {@link Mapper} for each path]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.MultipleInputs -->
+  <!-- start class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+  <class name="MultipleOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MultipleOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a composite record writer that can write key/value data to different
+ output files
+ 
+ @param fs
+          the file system to use
+ @param job
+          the job conf for the job
+ @param name
+          the leaf file name for the output file (such as part-00000")
+ @param arg3
+          a progressable for reporting progress.
+ @return a composite record writer
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="generateLeafFileName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Generate the leaf name for the output file name. The default behavior does
+ not change the leaf file name (such as part-00000)
+ 
+ @param name
+          the leaf file name for the output file
+ @return the given leaf file name]]>
+      </doc>
+    </method>
+    <method name="generateFileNameForKeyValue" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Generate the file output file name based on the given key and the leaf file
+ name. The default behavior is that the file name does not depend on the
+ key.
+ 
+ @param key
+          the key of the output data
+ @param name
+          the leaf file name
+ @return generated file name]]>
+      </doc>
+    </method>
+    <method name="generateActualKey" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <doc>
+      <![CDATA[Generate the actual key from the given key/value. The default behavior is that
+ the actual key is equal to the given key
+ 
+ @param key
+          the key of the output data
+ @param value
+          the value of the output data
+ @return the actual key derived from the given key/value]]>
+      </doc>
+    </method>
+    <method name="generateActualValue" return="V"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <doc>
+      <![CDATA[Generate the actual value from the given key and value. The default behavior is that
+ the actual value is equal to the given value
+ 
+ @param key
+          the key of the output data
+ @param value
+          the value of the output data
+ @return the actual value derived from the given key/value]]>
+      </doc>
+    </method>
+    <method name="getInputFileBasedOutputFileName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Generate the outfile name based on a given name and the input file name. If
+ the {@link JobContext#MAP_INPUT_FILE} does not exists (i.e. this is not for a map only job),
+ the given name is returned unchanged. If the config value for
+ "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given
+ name is returned unchanged. Otherwise, return a file name consisting of the
+ N trailing legs of the input file name where N is the config value for
+ "num.of.trailing.legs.to.use".
+ 
+ @param job
+          the job config
+ @param name
+          the output file name
+ @return the outfile name based on a given name and the input file name.]]>
+      </doc>
+    </method>
+    <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@param fs
+          the file system to use
+ @param job
+          a job conf object
+ @param name
+          the name of the file over which a record writer object will be
+          constructed
+ @param arg3
+          a progressable object
+ @return A RecordWriter object over the given file
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This abstract class extends the FileOutputFormat, allowing to write the
+ output data to different output files. There are three basic use cases for
+ this class.
+ 
+ Case one: This class is used for a map reduce job with at least one reducer.
+ The reducer wants to write data to different files depending on the actual
+ keys. It is assumed that a key (or value) encodes the actual key (value)
+ and the desired location for the actual key (value).
+ 
+ Case two: This class is used for a map only job. The job wants to use an
+ output file name that is either a part of the input file name of the input
+ data, or some derivation of it.
+ 
+ Case three: This class is used for a map only job. The job wants to use an
+ output file name that depends on both the keys and the input file name,]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.MultipleOutputFormat -->
+  <!-- start class org.apache.hadoop.mapred.lib.MultipleOutputs -->
+  <class name="MultipleOutputs" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MultipleOutputs" type="org.apache.hadoop.mapred.JobConf"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Creates and initializes multiple named outputs support, it should be
+ instantiated in the Mapper/Reducer configure method.
+
+ @param job the job configuration object]]>
+      </doc>
+    </constructor>
+    <method name="getNamedOutputsList" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Returns list of channel names.
+
+ @param conf job conf
+ @return List of channel Names]]>
+      </doc>
+    </method>
+    <method name="isMultiNamedOutput" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="namedOutput" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns if a named output is multiple.
+
+ @param conf        job conf
+ @param namedOutput named output
+ @return <code>true</code> if the name output is multi, <code>false</code>
+         if it is single. If the name output is not defined it returns
+         <code>false</code>]]>
+      </doc>
+    </method>
+    <method name="getNamedOutputFormatClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="namedOutput" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns the named output OutputFormat.
+
+ @param conf        job conf
+ @param namedOutput named output
+ @return namedOutput OutputFormat]]>
+      </doc>
+    </method>
+    <method name="getNamedOutputKeyClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="namedOutput" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns the key class for a named output.
+
+ @param conf        job conf
+ @param namedOutput named output
+ @return class for the named output key]]>
+      </doc>
+    </method>
+    <method name="getNamedOutputValueClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="namedOutput" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns the value class for a named output.
+
+ @param conf        job conf
+ @param namedOutput named output
+ @return class of named output value]]>
+      </doc>
+    </method>
+    <method name="addNamedOutput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="namedOutput" type="java.lang.String"/>
+      <param name="outputFormatClass" type="java.lang.Class"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valueClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Adds a named output for the job.
+
+ @param conf              job conf to add the named output
+ @param namedOutput       named output name, it has to be a word, letters
+                          and numbers only, cannot be the word 'part' as
+                          that is reserved for the
+                          default output.
+ @param outputFormatClass OutputFormat class.
+ @param keyClass          key class
+ @param valueClass        value class]]>
+      </doc>
+    </method>
+    <method name="addMultiNamedOutput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="namedOutput" type="java.lang.String"/>
+      <param name="outputFormatClass" type="java.lang.Class"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valueClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Adds a multi named output for the job.
+
+ @param conf              job conf to add the named output
+ @param namedOutput       named output name, it has to be a word, letters
+                          and numbers only, cannot be the word 'part' as
+                          that is reserved for the
+                          default output.
+ @param outputFormatClass OutputFormat class.
+ @param keyClass          key class
+ @param valueClass        value class]]>
+      </doc>
+    </method>
+    <method name="setCountersEnabled"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="enabled" type="boolean"/>
+      <doc>
+      <![CDATA[Enables or disables counters for the named outputs.
+ <p>
+ By default these counters are disabled.
+ <p>
+ MultipleOutputs supports counters, by default the are disabled.
+ The counters group is the {@link MultipleOutputs} class name.
+ </p>
+ The names of the counters are the same as the named outputs. For multi
+ named outputs the name of the counter is the concatenation of the named
+ output, and underscore '_' and the multiname.
+
+ @param conf    job conf to enableadd the named output.
+ @param enabled indicates if the counters will be enabled or not.]]>
+      </doc>
+    </method>
+    <method name="getCountersEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Returns if the counters for the named outputs are enabled or not.
+ <p>
+ By default these counters are disabled.
+ <p>
+ MultipleOutputs supports counters, by default the are disabled.
+ The counters group is the {@link MultipleOutputs} class name.
+ </p>
+ The names of the counters are the same as the named outputs. For multi
+ named outputs the name of the counter is the concatenation of the named
+ output, and underscore '_' and the multiname.
+
+
+ @param conf    job conf to enableadd the named output.
+ @return TRUE if the counters are enabled, FALSE if they are disabled.]]>
+      </doc>
+    </method>
+    <method name="getNamedOutputs" return="java.util.Iterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns iterator with the defined name outputs.
+
+ @return iterator with the defined named outputs]]>
+      </doc>
+    </method>
+    <method name="getCollector" return="org.apache.hadoop.mapred.OutputCollector"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="namedOutput" type="java.lang.String"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets the output collector for a named output.
+
+ @param namedOutput the named output name
+ @param reporter    the reporter
+ @return the output collector for the given named output
+ @throws IOException thrown if output collector could not be created]]>
+      </doc>
+    </method>
+    <method name="getCollector" return="org.apache.hadoop.mapred.OutputCollector"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="namedOutput" type="java.lang.String"/>
+      <param name="multiName" type="java.lang.String"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets the output collector for a multi named output.
+
+ @param namedOutput the named output name
+ @param multiName   the multi name part
+ @param reporter    the reporter
+ @return the output collector for the given named output
+ @throws IOException thrown if output collector could not be created]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Closes all the opened named outputs.
+ <p>
+ If overriden subclasses must invoke <code>super.close()</code> at the
+ end of their <code>close()</code>
+
+ @throws java.io.IOException thrown if any of the MultipleOutput files
+                             could not be closed properly.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The MultipleOutputs class simplifies writing to additional outputs other
+ than the job default output via the <code>OutputCollector</code> passed to
+ the <code>map()</code> and <code>reduce()</code> methods of the
+ <code>Mapper</code> and <code>Reducer</code> implementations.
+ <p>
+ Each additional output, or named output, may be configured with its own
+ <code>OutputFormat</code>, with its own key class and with its own value
+ class.
+ <p>
+ A named output can be a single file or a multi file. The later is referred as
+ a multi named output.
+ <p>
+ A multi named output is an unbound set of files all sharing the same
+ <code>OutputFormat</code>, key class and value class configuration.
+ <p>
+ When named outputs are used within a <code>Mapper</code> implementation,
+ key/values written to a name output are not part of the reduce phase, only
+ key/values written to the job <code>OutputCollector</code> are part of the
+ reduce phase.
+ <p>
+ MultipleOutputs supports counters, by default the are disabled. The counters
+ group is the {@link MultipleOutputs} class name.
+ </p>
+ The names of the counters are the same as the named outputs. For multi
+ named outputs the name of the counter is the concatenation of the named
+ output, and underscore '_' and the multiname.
+ <p>
+ Job configuration usage pattern is:
+ <pre>
+
+ JobConf conf = new JobConf();
+
+ conf.setInputPath(inDir);
+ FileOutputFormat.setOutputPath(conf, outDir);
+
+ conf.setMapperClass(MOMap.class);
+ conf.setReducerClass(MOReduce.class);
+ ...
+
+ // Defines additional single text based output 'text' for the job
+ MultipleOutputs.addNamedOutput(conf, "text", TextOutputFormat.class,
+ LongWritable.class, Text.class);
+
+ // Defines additional multi sequencefile based output 'sequence' for the
+ // job
+ MultipleOutputs.addMultiNamedOutput(conf, "seq",
+   SequenceFileOutputFormat.class,
+   LongWritable.class, Text.class);
+ ...
+
+ JobClient jc = new JobClient();
+ RunningJob job = jc.submitJob(conf);
+
+ ...
+ </pre>
+ <p>
+ Job configuration usage pattern is:
+ <pre>
+
+ public class MOReduce implements
+   Reducer&lt;WritableComparable, Writable&gt; {
+ private MultipleOutputs mos;
+
+ public void configure(JobConf conf) {
+ ...
+ mos = new MultipleOutputs(conf);
+ }
+
+ public void reduce(WritableComparable key, Iterator&lt;Writable&gt; values,
+ OutputCollector output, Reporter reporter)
+ throws IOException {
+ ...
+ mos.getCollector("text", reporter).collect(key, new Text("Hello"));
+ mos.getCollector("seq", "A", reporter).collect(key, new Text("Bye"));
+ mos.getCollector("seq", "B", reporter).collect(key, new Text("Chau"));
+ ...
+ }
+
+ public void close() throws IOException {
+ mos.close();
+ ...
+ }
+
+ }
+ </pre>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.MultipleOutputs -->
+  <!-- start class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+  <class name="MultipleSequenceFileOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MultipleSequenceFileOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output data 
+ to different output files in sequence file output format.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat -->
+  <!-- start class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+  <class name="MultipleTextOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MultipleTextOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="arg3" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output
+ data to different output files in Text output format.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat -->
+  <!-- start class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+  <class name="MultithreadedMapRunner" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.MapRunnable"/>
+    <constructor name="MultithreadedMapRunner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <method name="run"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="input" type="org.apache.hadoop.mapred.RecordReader"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Multithreaded implementation for {@link MapRunnable}.
+ <p>
+ It can be used instead of the default implementation,
+ of {@link org.apache.hadoop.mapred.MapRunner}, when the Map
+ operation is not CPU bound in order to improve throughput.
+ <p>
+ Map implementations using this MapRunnable must be thread-safe.
+ <p>
+ The Map-Reduce job has to be configured to use this MapRunnable class (using
+ the JobConf.setMapRunnerClass method) and
+ the number of threads the thread-pool can use with the
+ <code>mapred.map.multithreadedrunner.threads</code> property, its default
+ value is 10 threads.
+ <p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.MultithreadedMapRunner -->
+  <!-- start class org.apache.hadoop.mapred.lib.NLineInputFormat -->
+  <class name="NLineInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+    <constructor name="NLineInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="numSplits" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Logically splits the set of input files for the job, splits N lines
+ of the input as one split.
+ 
+ @see org.apache.hadoop.mapred.FileInputFormat#getSplits(JobConf, int)]]>
+      </doc>
+    </method>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <method name="createFileSplit" return="org.apache.hadoop.mapred.FileSplit"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fileName" type="org.apache.hadoop.fs.Path"/>
+      <param name="begin" type="long"/>
+      <param name="length" type="long"/>
+      <doc>
+      <![CDATA[NLineInputFormat uses LineRecordReader, which always reads
+ (and consumes) at least one character out of its upper split
+ boundary. So to make sure that each mapper gets N lines, we
+ move back the upper split limits of each split 
+ by one character here.
+ @param fileName  Path of file
+ @param begin  the position of the first byte in the file to process
+ @param length  number of bytes in InputSplit
+ @return  FileSplit]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[NLineInputFormat which splits N lines of input as one split.
+
+ In many "pleasantly" parallel applications, each process/mapper 
+ processes the same input file (s), but with computations are 
+ controlled by different parameters.(Referred to as "parameter sweeps").
+ One way to achieve this, is to specify a set of parameters 
+ (one set per line) as input in a control file 
+ (which is the input path to the map-reduce application,
+ where as the input dataset is specified 
+ via a config variable in JobConf.).
+ 
+ The NLineInputFormat can be used in such applications, that splits 
+ the input file such that by default, one line is fed as
+ a value to one map task, and key is the offset.
+ i.e. (k,v) is (LongWritable, Text).
+ The location hints will span the whole mapred cluster.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.NLineInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+  <class name="NullOutputFormat" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.OutputFormat"/>
+    <constructor name="NullOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+    </method>
+    <method name="checkOutputSpecs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <doc>
+    <![CDATA[Consume all outputs and put them in /dev/null.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.NullOutputFormat -->
+  <!-- start class org.apache.hadoop.mapred.lib.RegexMapper -->
+  <class name="RegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.Mapper"/>
+    <constructor name="RegexMapper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <method name="map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="org.apache.hadoop.io.Text"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.RegexMapper -->
+  <!-- start class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+  <class name="TokenCountMapper" extends="org.apache.hadoop.mapred.MapReduceBase"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.Mapper"/>
+    <constructor name="TokenCountMapper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="org.apache.hadoop.io.Text"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A {@link Mapper} that maps text values into &lt;token,freq&gt; pairs.  Uses
+ {@link StringTokenizer} to break text into tokens.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.TokenCountMapper -->
+  <!-- start class org.apache.hadoop.mapred.lib.TotalOrderPartitioner -->
+  <class name="TotalOrderPartitioner" extends="org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.Partitioner"/>
+    <constructor name="TotalOrderPartitioner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <method name="setPartitionFile"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use 
+ {@link #setPartitionFile(Configuration, Path)}
+ instead">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Set the path to the SequenceFile storing the sorted partition keyset.
+ It must be the case that for <tt>R</tt> reduces, there are <tt>R-1</tt>
+ keys in the SequenceFile.
+ @deprecated Use 
+ {@link #setPartitionFile(Configuration, Path)}
+ instead]]>
+      </doc>
+    </method>
+    <method name="getPartitionFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use 
+ {@link #getPartitionFile(Configuration)}
+ instead">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get the path to the SequenceFile storing the sorted partition keyset.
+ @see #setPartitionFile(JobConf,Path)
+ @deprecated Use 
+ {@link #getPartitionFile(Configuration)}
+ instead]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Partitioner effecting a total order by reading split points from
+ an externally generated source.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.TotalOrderPartitioner -->
+</package>
+<package name="org.apache.hadoop.mapred.lib.aggregate">
+  <!-- start class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+  <class name="DoubleValueSum" extends="org.apache.hadoop.mapreduce.lib.aggregate.DoubleValueSum"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+    <constructor name="DoubleValueSum"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This class implements a value aggregator that sums up a sequence of double
+ values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum -->
+  <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+  <class name="LongValueMax" extends="org.apache.hadoop.mapreduce.lib.aggregate.LongValueMax"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+    <constructor name="LongValueMax"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This class implements a value aggregator that maintain the maximum of 
+ a sequence of long values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMax -->
+  <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+  <class name="LongValueMin" extends="org.apache.hadoop.mapreduce.lib.aggregate.LongValueMin"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+    <constructor name="LongValueMin"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This class implements a value aggregator that maintain the minimum of 
+ a sequence of long values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMin -->
+  <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+  <class name="LongValueSum" extends="org.apache.hadoop.mapreduce.lib.aggregate.LongValueSum"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+    <constructor name="LongValueSum"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This class implements a value aggregator that sums up 
+ a sequence of long values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueSum -->
+  <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+  <class name="StringValueMax" extends="org.apache.hadoop.mapreduce.lib.aggregate.StringValueMax"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+    <constructor name="StringValueMax"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This class implements a value aggregator that maintain the biggest of 
+ a sequence of strings.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMax -->
+  <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+  <class name="StringValueMin" extends="org.apache.hadoop.mapreduce.lib.aggregate.StringValueMin"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+    <constructor name="StringValueMin"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This class implements a value aggregator that maintain the smallest of 
+ a sequence of strings.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMin -->
+  <!-- start class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+  <class name="UniqValueCount" extends="org.apache.hadoop.mapreduce.lib.aggregate.UniqValueCount"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+    <constructor name="UniqValueCount"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[the default constructor]]>
+      </doc>
+    </constructor>
+    <constructor name="UniqValueCount" type="long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[constructor
+ @param maxNum the limit in the number of unique values to keep.]]>
+      </doc>
+    </constructor>
+    <doc>
+    <![CDATA[This class implements a value aggregator that dedupes a sequence of objects.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount -->
+  <!-- start class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+  <class name="UserDefinedValueAggregatorDescriptor" extends="org.apache.hadoop.mapreduce.lib.aggregate.UserDefinedValueAggregatorDescriptor"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+    <constructor name="UserDefinedValueAggregatorDescriptor" type="java.lang.String, org.apache.hadoop.mapred.JobConf"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@param className the class name of the user defined descriptor class
+ @param job a configure object used for decriptor configuration]]>
+      </doc>
+    </constructor>
+    <method name="createInstance" return="java.lang.Object"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="className" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Create an instance of the given class
+ @param className the name of the class
+ @return a dynamically created instance of the given class]]>
+      </doc>
+    </method>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Do nothing.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class implements a wrapper for a user defined value aggregator 
+ descriptor.
+ It serves two functions: One is to create an object of 
+ ValueAggregatorDescriptor from the name of a user defined class that may be 
+ dynamically loaded. The other is to delegate invocations of 
+ generateKeyValPairs function to the created object.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+  <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+  <interface name="ValueAggregator"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregator"/>
+    <doc>
+    <![CDATA[This interface defines the minimal protocol for value aggregators.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator -->
+  <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+  <class name="ValueAggregatorBaseDescriptor" extends="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorBaseDescriptor"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/>
+    <constructor name="ValueAggregatorBaseDescriptor"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="generateEntry" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="java.lang.String"/>
+      <param name="id" type="java.lang.String"/>
+      <param name="val" type="org.apache.hadoop.io.Text"/>
+      <doc>
+      <![CDATA[@param type the aggregation type
+ @param id the aggregation id
+ @param val the val associated with the id to be aggregated
+ @return an Entry whose key is the aggregation id prefixed with 
+ the aggregation type.]]>
+      </doc>
+    </method>
+    <method name="generateValueAggregator" return="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="java.lang.String"/>
+      <doc>
+      <![CDATA[@param type the aggregation type
+ @return a value aggregator of the given type.]]>
+      </doc>
+    </method>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[get the input file name.
+ 
+ @param job a job configuration object]]>
+      </doc>
+    </method>
+    <field name="UNIQ_VALUE_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LONG_VALUE_SUM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DOUBLE_VALUE_SUM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="VALUE_HISTOGRAM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LONG_VALUE_MAX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LONG_VALUE_MIN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="STRING_VALUE_MAX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="STRING_VALUE_MIN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class implements the common functionalities of 
+ the subclasses of ValueAggregatorDescriptor class.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor -->
+  <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+  <class name="ValueAggregatorCombiner" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ValueAggregatorCombiner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Combiner does not need to configure.]]>
+      </doc>
+    </method>
+    <method name="reduce"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.io.Text"/>
+      <param name="values" type="java.util.Iterator"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Combines values for a given key.  
+ @param key the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values. 
+ @param values the values to combine
+ @param output to collect combined values]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Do nothing.]]>
+      </doc>
+    </method>
+    <method name="map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="arg0" type="K1"/>
+      <param name="arg1" type="V1"/>
+      <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Do nothing. Should not be called.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class implements the generic combiner of Aggregate.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner -->
+  <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+  <interface name="ValueAggregatorDescriptor"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorDescriptor"/>
+    <method name="configure"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Configure the object
+ 
+ @param job
+          a JobConf object that may contain the information that can be used
+          to configure the object.]]>
+      </doc>
+    </method>
+    <field name="TYPE_SEPARATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ONE" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This interface defines the contract a value aggregator descriptor must
+ support. Such a descriptor can be configured with a JobConf object. Its main
+ function is to generate a list of aggregation-id/value pairs. An aggregation
+ id encodes an aggregation type which is used to guide the way to aggregate
+ the value in the reduce/combiner phrase of an Aggregate based job.The mapper in
+ an Aggregate based map/reduce job may create one or more of
+ ValueAggregatorDescriptor objects at configuration time. For each input
+ key/value pair, the mapper will use those objects to create aggregation
+ id/value pairs.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor -->
+  <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+  <class name="ValueAggregatorJob" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ValueAggregatorJob"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <param name="descriptors" type="java.lang.Class[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <param name="caller" type="java.lang.Class"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create an Aggregate based map/reduce job.
+
+ @param args the arguments used for job creation. Generic hadoop
+ arguments are accepted.
+ @param caller the the caller class.
+ @return a JobConf object ready for submission.
+
+ @throws IOException
+ @see GenericOptionsParser]]>
+      </doc>
+    </method>
+    <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create an Aggregate based map/reduce job.
+ 
+ @param args the arguments used for job creation. Generic hadoop
+ arguments are accepted.
+ @return a JobConf object ready for submission.
+ 
+ @throws IOException
+ @see GenericOptionsParser]]>
+      </doc>
+    </method>
+    <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <param name="descriptors" type="java.lang.Class[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setAggregatorDescriptors"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="descriptors" type="java.lang.Class[]"/>
+    </method>
+    <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <param name="descriptors" type="java.lang.Class[]"/>
+      <param name="caller" type="java.lang.Class"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[create and run an Aggregate based map/reduce job.
+ 
+ @param args the arguments used for job creation
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the main class for creating a map/reduce job using Aggregate
+ framework. The Aggregate is a specialization of map/reduce framework,
+ specilizing for performing various simple aggregations.
+ 
+ Generally speaking, in order to implement an application using Map/Reduce
+ model, the developer is to implement Map and Reduce functions (and possibly
+ combine function). However, a lot of applications related to counting and
+ statistics computing have very similar characteristics. Aggregate abstracts
+ out the general patterns of these functions and implementing those patterns.
+ In particular, the package provides generic mapper/redducer/combiner classes,
+ and a set of built-in value aggregators, and a generic utility class that
+ helps user create map/reduce jobs using the generic class. The built-in
+ aggregators include:
+ 
+ sum over numeric values count the number of distinct values compute the
+ histogram of values compute the minimum, maximum, media,average, standard
+ deviation of numeric values
+ 
+ The developer using Aggregate will need only to provide a plugin class
+ conforming to the following interface:
+ 
+ public interface ValueAggregatorDescriptor { public ArrayList&lt;Entry&gt;
+ generateKeyValPairs(Object key, Object value); public void
+ configure(JobConfjob); }
+ 
+ The package also provides a base class, ValueAggregatorBaseDescriptor,
+ implementing the above interface. The user can extend the base class and
+ implement generateKeyValPairs accordingly.
+ 
+ The primary work of generateKeyValPairs is to emit one or more key/value
+ pairs based on the input key/value pair. The key in an output key/value pair
+ encode two pieces of information: aggregation type and aggregation id. The
+ value will be aggregated onto the aggregation id according the aggregation
+ type.
+ 
+ This class offers a function to generate a map/reduce job using Aggregate
+ framework. The function takes the following parameters: input directory spec
+ input format (text or sequence file) output directory a file specifying the
+ user plugin class]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob -->
+  <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+  <class name="ValueAggregatorJobBase" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.Mapper"/>
+    <implements name="org.apache.hadoop.mapred.Reducer"/>
+    <constructor name="ValueAggregatorJobBase"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+    </method>
+    <method name="logSpec"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="aggregatorDescriptorList" type="java.util.ArrayList"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This abstract class implements some common functionalities of the
+ the generic mapper, reducer and combiner classes of Aggregate.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase -->
+  <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+  <class name="ValueAggregatorMapper" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ValueAggregatorMapper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K1"/>
+      <param name="value" type="V1"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[the map function. It iterates through the value aggregator descriptor 
+  list to generate aggregation id/value pairs and emit them.]]>
+      </doc>
+    </method>
+    <method name="reduce"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="arg0" type="org.apache.hadoop.io.Text"/>
+      <param name="arg1" type="java.util.Iterator"/>
+      <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Do nothing. Should not be called.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class implements the generic mapper of Aggregate.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper -->
+  <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+  <class name="ValueAggregatorReducer" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ValueAggregatorReducer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="reduce"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.io.Text"/>
+      <param name="values" type="java.util.Iterator"/>
+      <param name="output" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@param key
+          the key is expected to be a Text object, whose prefix indicates
+          the type of aggregation to aggregate the values. In effect, data
+          driven computing is achieved. It is assumed that each aggregator's
+          getReport method emits appropriate output for the aggregator. This
+          may be further customiized.
+ @param values
+          the values to be aggregated]]>
+      </doc>
+    </method>
+    <method name="map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="arg0" type="K1"/>
+      <param name="arg1" type="V1"/>
+      <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector"/>
+      <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Do nothing. Should not be called]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class implements the generic reducer of Aggregate.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer -->
+  <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+  <class name="ValueHistogram" extends="org.apache.hadoop.mapreduce.lib.aggregate.ValueHistogram"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/>
+    <constructor name="ValueHistogram"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This class implements a value aggregator that computes the 
+ histogram of a sequence of strings.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram -->
+</package>
+<package name="org.apache.hadoop.mapred.lib.db">
+  <!-- start class org.apache.hadoop.mapred.lib.db.DBConfiguration -->
+  <class name="DBConfiguration" extends="org.apache.hadoop.mapreduce.lib.db.DBConfiguration"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="configureDB"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="driverClass" type="java.lang.String"/>
+      <param name="dbUrl" type="java.lang.String"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="passwd" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Sets the DB access related fields in the JobConf.  
+ @param job the job
+ @param driverClass JDBC Driver class name
+ @param dbUrl JDBC DB access URL. 
+ @param userName DB access username 
+ @param passwd DB access passwd]]>
+      </doc>
+    </method>
+    <method name="configureDB"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="driverClass" type="java.lang.String"/>
+      <param name="dbUrl" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Sets the DB access related fields in the JobConf.  
+ @param job the job
+ @param driverClass JDBC Driver class name
+ @param dbUrl JDBC DB access URL.]]>
+      </doc>
+    </method>
+    <field name="DRIVER_CLASS_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The JDBC Driver class name]]>
+      </doc>
+    </field>
+    <field name="URL_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[JDBC Database access URL]]>
+      </doc>
+    </field>
+    <field name="USERNAME_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[User name to access the database]]>
+      </doc>
+    </field>
+    <field name="PASSWORD_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Password to access the database]]>
+      </doc>
+    </field>
+    <field name="INPUT_TABLE_NAME_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Input table name]]>
+      </doc>
+    </field>
+    <field name="INPUT_FIELD_NAMES_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Field names in the Input table]]>
+      </doc>
+    </field>
+    <field name="INPUT_CONDITIONS_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[WHERE clause in the input SELECT statement]]>
+      </doc>
+    </field>
+    <field name="INPUT_ORDER_BY_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[ORDER BY clause in the input SELECT statement]]>
+      </doc>
+    </field>
+    <field name="INPUT_QUERY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Whole input query, exluding LIMIT...OFFSET]]>
+      </doc>
+    </field>
+    <field name="INPUT_COUNT_QUERY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Input query to get the count of records]]>
+      </doc>
+    </field>
+    <field name="INPUT_CLASS_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Class name implementing DBWritable which will hold input tuples]]>
+      </doc>
+    </field>
+    <field name="OUTPUT_TABLE_NAME_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Output table name]]>
+      </doc>
+    </field>
+    <field name="OUTPUT_FIELD_NAMES_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Field names in the Output table]]>
+      </doc>
+    </field>
+    <field name="OUTPUT_FIELD_COUNT_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of fields in the Output table]]>
+      </doc>
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.db.DBConfiguration -->
+  <!-- start class org.apache.hadoop.mapred.lib.db.DBInputFormat -->
+  <class name="DBInputFormat" extends="org.apache.hadoop.mapreduce.lib.db.DBInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.InputFormat"/>
+    <implements name="org.apache.hadoop.mapred.JobConfigurable"/>
+    <constructor name="DBInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapred.InputSplit"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="chunks" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="setInput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="inputClass" type="java.lang.Class"/>
+      <param name="tableName" type="java.lang.String"/>
+      <param name="conditions" type="java.lang.String"/>
+      <param name="orderBy" type="java.lang.String"/>
+      <param name="fieldNames" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Initializes the map-part of the job with the appropriate input settings.
+ 
+ @param job The job
+ @param inputClass the class object implementing DBWritable, which is the 
+ Java object holding tuple fields.
+ @param tableName The table to read data from
+ @param conditions The condition which to select data with, eg. '(updated &gt;
+ 20070101 AND length &gt; 0)'
+ @param orderBy the fieldNames in the orderBy clause.
+ @param fieldNames The field names in the table
+ @see #setInput(JobConf, Class, String, String)]]>
+      </doc>
+    </method>
+    <method name="setInput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="inputClass" type="java.lang.Class"/>
+      <param name="inputQuery" type="java.lang.String"/>
+      <param name="inputCountQuery" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Initializes the map-part of the job with the appropriate input settings.
+ 
+ @param job The job
+ @param inputClass the class object implementing DBWritable, which is the 
+ Java object holding tuple fields.
+ @param inputQuery the input query to select fields. Example : 
+ "SELECT f1, f2, f3 FROM Mytable ORDER BY f1"
+ @param inputCountQuery the input query that returns the number of records in
+ the table. 
+ Example : "SELECT COUNT(f1) FROM Mytable"
+ @see #setInput(JobConf, Class, String, String, String, String...)]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.db.DBInputFormat -->
+  <!-- start class org.apache.hadoop.mapred.lib.db.DBOutputFormat -->
+  <class name="DBOutputFormat" extends="org.apache.hadoop.mapreduce.lib.db.DBOutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapred.OutputFormat"/>
+    <constructor name="DBOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="checkOutputSpecs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filesystem" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="filesystem" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="progress" type="org.apache.hadoop.util.Progressable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="setOutput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="tableName" type="java.lang.String"/>
+      <param name="fieldNames" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Initializes the reduce-part of the job with the appropriate output settings
+ 
+ @param job The job
+ @param tableName The table to insert data into
+ @param fieldNames The field names in the table.]]>
+      </doc>
+    </method>
+    <method name="setOutput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="tableName" type="java.lang.String"/>
+      <param name="fieldCount" type="int"/>
+      <doc>
+      <![CDATA[Initializes the reduce-part of the job with the appropriate output settings
+ 
+ @param job The job
+ @param tableName The table to insert data into
+ @param fieldCount the number of fields in the table.]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.lib.db.DBOutputFormat -->
+  <!-- start interface org.apache.hadoop.mapred.lib.db.DBWritable -->
+  <interface name="DBWritable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.db.DBWritable"/>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapred.lib.db.DBWritable -->
+</package>
+<package name="org.apache.hadoop.mapred.pipes">
+  <!-- start class org.apache.hadoop.mapred.pipes.Submitter -->
+  <class name="Submitter" extends="org.apache.hadoop.conf.Configured"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.util.Tool"/>
+    <constructor name="Submitter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="Submitter" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getExecutable" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Get the URI of the application's executable.
+ @param conf
+ @return the URI where the application's executable is located]]>
+      </doc>
+    </method>
+    <method name="setExecutable"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="executable" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the URI for the application's executable. Normally this is a hdfs: 
+ location.
+ @param conf
+ @param executable The URI of the application's executable.]]>
+      </doc>
+    </method>
+    <method name="setIsJavaRecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="value" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether the job is using a Java RecordReader.
+ @param conf the configuration to modify
+ @param value the new value]]>
+      </doc>
+    </method>
+    <method name="getIsJavaRecordReader" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Check whether the job is using a Java RecordReader
+ @param conf the configuration to check
+ @return is it a Java RecordReader?]]>
+      </doc>
+    </method>
+    <method name="setIsJavaMapper"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="value" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether the Mapper is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+      </doc>
+    </method>
+    <method name="getIsJavaMapper" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Check whether the job is using a Java Mapper.
+ @param conf the configuration to check
+ @return is it a Java Mapper?]]>
+      </doc>
+    </method>
+    <method name="setIsJavaReducer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="value" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether the Reducer is written in Java.
+ @param conf the configuration to modify
+ @param value the new value]]>
+      </doc>
+    </method>
+    <method name="getIsJavaReducer" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Check whether the job is using a Java Reducer.
+ @param conf the configuration to check
+ @return is it a Java Reducer?]]>
+      </doc>
+    </method>
+    <method name="setIsJavaRecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="value" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether the job will use a Java RecordWriter.
+ @param conf the configuration to modify
+ @param value the new value to set]]>
+      </doc>
+    </method>
+    <method name="getIsJavaRecordWriter" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Will the reduce use a Java RecordWriter?
+ @param conf the configuration to check
+ @return true, if the output of the job will be written by Java]]>
+      </doc>
+    </method>
+    <method name="getKeepCommandFile" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <doc>
+      <![CDATA[Does the user want to keep the command file for debugging? If this is
+ true, pipes will write a copy of the command data to a file in the
+ task directory named "downlink.data", which may be used to run the C++
+ program under the debugger. You probably also want to set 
+ JobConf.setKeepFailedTaskFiles(true) to keep the entire directory from
+ being deleted.
+ To run using the data file, set the environment variable 
+ "mapreduce.pipes.commandfile" to point to the file.
+ @param conf the configuration to check
+ @return will the framework save the command file?]]>
+      </doc>
+    </method>
+    <method name="setKeepCommandFile"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <param name="keep" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether to keep the command file for debugging
+ @param conf the configuration to modify
+ @param keep the new value]]>
+      </doc>
+    </method>
+    <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link Submitter#runJob(JobConf)}">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Submit a job to the map/reduce cluster. All of the necessary modifications
+ to the job to run under pipes are made to the configuration.
+ @param conf the job to submit to the cluster (MODIFIED)
+ @throws IOException
+ @deprecated Use {@link Submitter#runJob(JobConf)}]]>
+      </doc>
+    </method>
+    <method name="runJob" return="org.apache.hadoop.mapred.RunningJob"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Submit a job to the map/reduce cluster. All of the necessary modifications
+ to the job to run under pipes are made to the configuration.
+ @param conf the job to submit to the cluster (MODIFIED)
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="jobSubmit" return="org.apache.hadoop.mapred.RunningJob"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.mapred.JobConf"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Submit a job to the Map-Reduce framework.
+ This returns a handle to the {@link RunningJob} which can be used to track
+ the running-job.
+ 
+ @param conf the job configuration.
+ @return a handle to the {@link RunningJob} which can be used to track the
+         running-job.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="run" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[Submit a pipes job based on the command line arguments.
+ @param args]]>
+      </doc>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="PRESERVE_COMMANDFILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="EXECUTABLE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="INTERPRETOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IS_JAVA_MAP" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IS_JAVA_RR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IS_JAVA_RW" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IS_JAVA_REDUCE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="PARTITIONER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="INPUT_FORMAT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="PORT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[The main entry point and job submitter. It may either be used as a command
+ line-based or API-based method to launch Pipes jobs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapred.pipes.Submitter -->
+</package>
+<package name="org.apache.hadoop.mapreduce">
+  <!-- start class org.apache.hadoop.mapreduce.Cluster -->
+  <class name="Cluster" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Cluster" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <constructor name="Cluster" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="close"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Close the <code>Cluster</code>.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getFileSystem" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the file system where job-specific files are stored
+ 
+ @return object of FileSystem
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getJob" return="org.apache.hadoop.mapreduce.Job"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobId" type="org.apache.hadoop.mapreduce.JobID"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get job corresponding to jobid.
+ 
+ @param jobId
+ @return object of {@link Job}
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getQueues" return="org.apache.hadoop.mapreduce.QueueInfo[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get all the queues in cluster.
+ 
+ @return array of {@link QueueInfo}
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getQueue" return="org.apache.hadoop.mapreduce.QueueInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get queue information for the specified name.
+ 
+ @param name queuename
+ @return object of {@link QueueInfo}
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getLogParams" return="org.apache.hadoop.mapreduce.v2.LogParams"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobID" type="org.apache.hadoop.mapreduce.JobID"/>
+      <param name="taskAttemptID" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get log parameters for the specified jobID or taskAttemptID
+ @param jobID the job id.
+ @param taskAttemptID the task attempt id. Optional.
+ @return the LogParams
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getClusterStatus" return="org.apache.hadoop.mapreduce.ClusterMetrics"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get current cluster status.
+ 
+ @return object of {@link ClusterMetrics}
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getActiveTaskTrackers" return="org.apache.hadoop.mapreduce.TaskTrackerInfo[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get all active trackers in the cluster.
+ 
+ @return array of {@link TaskTrackerInfo}
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getBlackListedTaskTrackers" return="org.apache.hadoop.mapreduce.TaskTrackerInfo[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get blacklisted trackers.
+ 
+ @return array of {@link TaskTrackerInfo}
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getAllJobs" return="org.apache.hadoop.mapreduce.Job[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #getAllJobStatuses()} instead.">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get all the jobs in cluster.
+ 
+ @return array of {@link Job}
+ @throws IOException
+ @throws InterruptedException
+ @deprecated Use {@link #getAllJobStatuses()} instead.]]>
+      </doc>
+    </method>
+    <method name="getAllJobStatuses" return="org.apache.hadoop.mapreduce.JobStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get job status for all jobs in the cluster.
+ @return job status for all jobs in cluster
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getSystemDir" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Grab the jobtracker system directory path where 
+ job-specific files will  be placed.
+ 
+ @return the system directory where job-specific files are to be placed.]]>
+      </doc>
+    </method>
+    <method name="getStagingAreaDir" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Grab the jobtracker's view of the staging directory path where 
+ job-specific files will  be placed.
+ 
+ @return the staging directory where job-specific files are to be placed.]]>
+      </doc>
+    </method>
+    <method name="getJobHistoryUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobId" type="org.apache.hadoop.mapreduce.JobID"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the job history file path for a given job id. The job history file at 
+ this path may or may not be existing depending on the job completion state.
+ The file is present only for the completed jobs.
+ @param jobId the JobID of the job submitted by the current user.
+ @return the file path of the job history file
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getQueueAclsForCurrentUser" return="org.apache.hadoop.mapreduce.QueueAclsInfo[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Gets the Queue ACLs for current user
+ @return array of QueueAclsInfo object for current user.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRootQueues" return="org.apache.hadoop.mapreduce.QueueInfo[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Gets the root level queues.
+ @return array of JobQueueInfo object.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getChildQueues" return="org.apache.hadoop.mapreduce.QueueInfo[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queueName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Returns immediate children of queueName.
+ @param queueName
+ @return array of JobQueueInfo which are children of queueName
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getJobTrackerStatus" return="org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the JobTracker's status.
+ 
+ @return {@link JobTrackerStatus} of the JobTracker
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getTaskTrackerExpiryInterval" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the tasktracker expiry interval for the cluster
+ @return the expiry interval in msec]]>
+      </doc>
+    </method>
+    <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="org.apache.hadoop.io.Text"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get a delegation token for the user from the JobTracker.
+ @param renewer the user who can renew the token
+ @return the new token
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="renewDelegationToken" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link Token#renew} instead">
+      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Renew a delegation token
+ @param token the token to renew
+ @return the new expiration time
+ @throws InvalidToken
+ @throws IOException
+ @deprecated Use {@link Token#renew} instead]]>
+      </doc>
+    </method>
+    <method name="cancelDelegationToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link Token#cancel} instead">
+      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Cancel a delegation token from the JobTracker
+ @param token the token to cancel
+ @throws IOException
+ @deprecated Use {@link Token#cancel} instead]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Provides a way to access information about the map/reduce cluster.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.Cluster -->
+  <!-- start class org.apache.hadoop.mapreduce.ClusterMetrics -->
+  <class name="ClusterMetrics" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="ClusterMetrics"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ClusterMetrics" type="int, int, int, int, int, int, int, int, int, int, int, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ClusterMetrics" type="int, int, int, int, int, int, int, int, int, int, int, int, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRunningMaps" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of running map tasks in the cluster.
+ 
+ @return running maps]]>
+      </doc>
+    </method>
+    <method name="getRunningReduces" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of running reduce tasks in the cluster.
+ 
+ @return running reduces]]>
+      </doc>
+    </method>
+    <method name="getOccupiedMapSlots" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get number of occupied map slots in the cluster.
+ 
+ @return occupied map slot count]]>
+      </doc>
+    </method>
+    <method name="getOccupiedReduceSlots" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of occupied reduce slots in the cluster.
+ 
+ @return occupied reduce slot count]]>
+      </doc>
+    </method>
+    <method name="getReservedMapSlots" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get number of reserved map slots in the cluster.
+ 
+ @return reserved map slot count]]>
+      </doc>
+    </method>
+    <method name="getReservedReduceSlots" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of reserved reduce slots in the cluster.
+ 
+ @return reserved reduce slot count]]>
+      </doc>
+    </method>
+    <method name="getMapSlotCapacity" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the total number of map slots in the cluster.
+ 
+ @return map slot capacity]]>
+      </doc>
+    </method>
+    <method name="getReduceSlotCapacity" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the total number of reduce slots in the cluster.
+ 
+ @return reduce slot capacity]]>
+      </doc>
+    </method>
+    <method name="getTotalJobSubmissions" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the total number of job submissions in the cluster.
+ 
+ @return total number of job submissions]]>
+      </doc>
+    </method>
+    <method name="getTaskTrackerCount" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of active trackers in the cluster.
+ 
+ @return active tracker count.]]>
+      </doc>
+    </method>
+    <method name="getBlackListedTaskTrackerCount" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of blacklisted trackers in the cluster.
+ 
+ @return blacklisted tracker count]]>
+      </doc>
+    </method>
+    <method name="getGrayListedTaskTrackerCount" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of graylisted trackers in the cluster.
+ 
+ @return graylisted tracker count]]>
+      </doc>
+    </method>
+    <method name="getDecommissionedTaskTrackerCount" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of decommissioned trackers in the cluster.
+ 
+ @return decommissioned tracker count]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Status information on the current state of the Map-Reduce cluster.
+ 
+ <p><code>ClusterMetrics</code> provides clients with information such as:
+ <ol>
+   <li>
+   Size of the cluster.  
+   </li>
+   <li>
+   Number of blacklisted and decommissioned trackers.  
+   </li>
+   <li>
+   Slot capacity of the cluster. 
+   </li>
+   <li>
+   The number of currently occupied/reserved map and reduce slots.
+   </li>
+   <li>
+   The number of currently running map and reduce tasks.
+   </li>
+   <li>
+   The number of job submissions.
+   </li>
+ </ol>
+ 
+ <p>Clients can query for the latest <code>ClusterMetrics</code>, via 
+ {@link Cluster#getClusterStatus()}.</p>
+ 
+ @see Cluster]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.ClusterMetrics -->
+  <!-- start interface org.apache.hadoop.mapreduce.Counter -->
+  <interface name="Counter"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <method name="setDisplayName"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="(and no-op by default)">
+      <param name="displayName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the display name of the counter
+ @param displayName of the counter
+ @deprecated (and no-op by default)]]>
+      </doc>
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the name of the counter]]>
+      </doc>
+    </method>
+    <method name="getDisplayName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the display name of the counter.
+ @return the user facing name of the counter]]>
+      </doc>
+    </method>
+    <method name="getValue" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[What is the current value of this counter?
+ @return the current value]]>
+      </doc>
+    </method>
+    <method name="setValue"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Set this counter by the given value
+ @param value the value to set]]>
+      </doc>
+    </method>
+    <method name="increment"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="incr" type="long"/>
+      <doc>
+      <![CDATA[Increment this counter by the given value
+ @param incr the value to increase this counter by]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A named counter that tracks the progress of a map/reduce job.
+
+ <p><code>Counters</code> represent global counters, defined either by the
+ Map-Reduce framework or applications. Each <code>Counter</code> is named by
+ an {@link Enum} and has a long for the value.</p>
+
+ <p><code>Counters</code> are bunched into Groups, each comprising of
+ counters from a particular <code>Enum</code> class.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.Counter -->
+  <!-- start interface org.apache.hadoop.mapreduce.CounterGroup -->
+  <interface name="CounterGroup"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.counters.CounterGroupBase"/>
+    <doc>
+    <![CDATA[A group of {@link Counter}s that logically belong together. Typically,
+ it is an {@link Enum} subclass and the counters are the values.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.CounterGroup -->
+  <!-- start class org.apache.hadoop.mapreduce.Counters -->
+  <class name="Counters" extends="org.apache.hadoop.mapreduce.counters.AbstractCounters"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Counters"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor]]>
+      </doc>
+    </constructor>
+    <constructor name="Counters" type="org.apache.hadoop.mapreduce.counters.AbstractCounters"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct the Counters object from the another counters object
+ @param <C> the type of counter
+ @param <G> the type of counter group
+ @param counters the old counters object]]>
+      </doc>
+    </constructor>
+    <doc>
+    <![CDATA[<p><code>Counters</code> holds per job/task counters, defined either by the
+ Map-Reduce framework or applications. Each <code>Counter</code> can be of
+ any {@link Enum} type.</p>
+
+ <p><code>Counters</code> are bunched into {@link CounterGroup}s, each
+ comprising of counters from a particular <code>Enum</code> class.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.Counters -->
+  <!-- start class org.apache.hadoop.mapreduce.ID -->
+  <class name="ID" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.WritableComparable"/>
+    <constructor name="ID" type="int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[constructs an ID object from the given int]]>
+      </doc>
+    </constructor>
+    <constructor name="ID"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[returns the int which represents the identifier]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="that" type="org.apache.hadoop.mapreduce.ID"/>
+      <doc>
+      <![CDATA[Compare IDs by associated numbers]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="SEPARATOR" type="char"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="id" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A general identifier, which internally stores the id
+ as an integer. This is the super class of {@link JobID}, 
+ {@link TaskID} and {@link TaskAttemptID}.
+ 
+ @see JobID
+ @see TaskID
+ @see TaskAttemptID]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.ID -->
+  <!-- start class org.apache.hadoop.mapreduce.InputFormat -->
+  <class name="InputFormat" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="InputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getSplits" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Logically split the set of input files for the job.  
+ 
+ <p>Each {@link InputSplit} is then assigned to an individual {@link Mapper}
+ for processing.</p>
+
+ <p><i>Note</i>: The split is a <i>logical</i> split of the inputs and the
+ input files are not physically split into chunks. For e.g. a split could
+ be <i>&lt;input-file-path, start, offset&gt;</i> tuple. The InputFormat
+ also creates the {@link RecordReader} to read the {@link InputSplit}.
+ 
+ @param context job configuration.
+ @return an array of {@link InputSplit}s for the job.]]>
+      </doc>
+    </method>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Create a record reader for a given split. The framework will call
+ {@link RecordReader#initialize(InputSplit, TaskAttemptContext)} before
+ the split is used.
+ @param split the split to be read
+ @param context the information about the task
+ @return a new record reader
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<code>InputFormat</code> describes the input-specification for a 
+ Map-Reduce job. 
+ 
+ <p>The Map-Reduce framework relies on the <code>InputFormat</code> of the
+ job to:<p>
+ <ol>
+   <li>
+   Validate the input-specification of the job. 
+   <li>
+   Split-up the input file(s) into logical {@link InputSplit}s, each of 
+   which is then assigned to an individual {@link Mapper}.
+   </li>
+   <li>
+   Provide the {@link RecordReader} implementation to be used to glean
+   input records from the logical <code>InputSplit</code> for processing by 
+   the {@link Mapper}.
+   </li>
+ </ol>
+ 
+ <p>The default behavior of file-based {@link InputFormat}s, typically 
+ sub-classes of {@link FileInputFormat}, is to split the 
+ input into <i>logical</i> {@link InputSplit}s based on the total size, in 
+ bytes, of the input files. However, the {@link FileSystem} blocksize of  
+ the input files is treated as an upper bound for input splits. A lower bound 
+ on the split size can be set via 
+ <a href="{@docRoot}/../hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml#mapreduce.input.fileinputformat.split.minsize">
+ mapreduce.input.fileinputformat.split.minsize</a>.</p>
+ 
+ <p>Clearly, logical splits based on input-size is insufficient for many 
+ applications since record boundaries are to respected. In such cases, the
+ application has to also implement a {@link RecordReader} on whom lies the
+ responsibility to respect record-boundaries and present a record-oriented
+ view of the logical <code>InputSplit</code> to the individual task.
+
+ @see InputSplit
+ @see RecordReader
+ @see FileInputFormat]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.InputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.InputSplit -->
+  <class name="InputSplit" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="InputSplit"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getLength" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the size of the split, so that the input splits can be sorted by size.
+ @return the number of bytes in the split
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getLocations" return="java.lang.String[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the list of nodes by name where the data for the split would be local.
+ The locations do not need to be serialized.
+ 
+ @return a new array of the node nodes.
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getLocationInfo" return="org.apache.hadoop.mapred.SplitLocationInfo[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets info about which nodes the input split is stored on and how it is
+ stored at each location.
+ 
+ @return list of <code>SplitLocationInfo</code>s describing how the split
+    data is stored at each location. A null value indicates that all the
+    locations have the data stored on disk.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<code>InputSplit</code> represents the data to be processed by an 
+ individual {@link Mapper}. 
+
+ <p>Typically, it presents a byte-oriented view on the input and is the 
+ responsibility of {@link RecordReader} of the job to process this and present
+ a record-oriented view.
+ 
+ @see InputFormat
+ @see RecordReader]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.InputSplit -->
+  <!-- start class org.apache.hadoop.mapreduce.Job -->
+  <class name="Job" extends="org.apache.hadoop.mapreduce.task.JobContextImpl"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.JobContext"/>
+    <implements name="java.lang.AutoCloseable"/>
+    <constructor name="Job"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #getInstance()}">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@deprecated Use {@link #getInstance()}]]>
+      </doc>
+    </constructor>
+    <constructor name="Job" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #getInstance(Configuration)}">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@deprecated Use {@link #getInstance(Configuration)}]]>
+      </doc>
+    </constructor>
+    <constructor name="Job" type="org.apache.hadoop.conf.Configuration, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #getInstance(Configuration, String)}">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@deprecated Use {@link #getInstance(Configuration, String)}]]>
+      </doc>
+    </constructor>
+    <method name="getInstance" return="org.apache.hadoop.mapreduce.Job"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Creates a new {@link Job} with no particular {@link Cluster} .
+ A Cluster will be created with a generic {@link Configuration}.
+ 
+ @return the {@link Job} , with no connection to a cluster yet.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getInstance" return="org.apache.hadoop.mapreduce.Job"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Creates a new {@link Job} with no particular {@link Cluster} and a 
+ given {@link Configuration}.
+ 
+ The <code>Job</code> makes a copy of the <code>Configuration</code> so 
+ that any necessary internal modifications do not reflect on the incoming 
+ parameter.
+ 
+ A Cluster will be created from the conf parameter only when it's needed.
+ 
+ @param conf the configuration
+ @return the {@link Job} , with no connection to a cluster yet.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getInstance" return="org.apache.hadoop.mapreduce.Job"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="jobName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Creates a new {@link Job} with no particular {@link Cluster} and a given jobName.
+ A Cluster will be created from the conf parameter only when it's needed.
+
+ The <code>Job</code> makes a copy of the <code>Configuration</code> so 
+ that any necessary internal modifications do not reflect on the incoming 
+ parameter.
+ 
+ @param conf the configuration
+ @return the {@link Job} , with no connection to a cluster yet.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getInstance" return="org.apache.hadoop.mapreduce.Job"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="status" type="org.apache.hadoop.mapreduce.JobStatus"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Creates a new {@link Job} with no particular {@link Cluster} and given
+ {@link Configuration} and {@link JobStatus}.
+ A Cluster will be created from the conf parameter only when it's needed.
+ 
+ The <code>Job</code> makes a copy of the <code>Configuration</code> so 
+ that any necessary internal modifications do not reflect on the incoming 
+ parameter.
+ 
+ @param status job status
+ @param conf job configuration
+ @return the {@link Job} , with no connection to a cluster yet.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getInstance" return="org.apache.hadoop.mapreduce.Job"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #getInstance()}">
+      <param name="ignored" type="org.apache.hadoop.mapreduce.Cluster"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Creates a new {@link Job} with no particular {@link Cluster}.
+ A Cluster will be created from the conf parameter only when it's needed.
+
+ The <code>Job</code> makes a copy of the <code>Configuration</code> so 
+ that any necessary internal modifications do not reflect on the incoming 
+ parameter.
+ 
+ @param ignored
+ @return the {@link Job} , with no connection to a cluster yet.
+ @throws IOException
+ @deprecated Use {@link #getInstance()}]]>
+      </doc>
+    </method>
+    <method name="getInstance" return="org.apache.hadoop.mapreduce.Job"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #getInstance(Configuration)}">
+      <param name="ignored" type="org.apache.hadoop.mapreduce.Cluster"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Creates a new {@link Job} with no particular {@link Cluster} and given
+ {@link Configuration}.
+ A Cluster will be created from the conf parameter only when it's needed.
+ 
+ The <code>Job</code> makes a copy of the <code>Configuration</code> so 
+ that any necessary internal modifications do not reflect on the incoming 
+ parameter.
+ 
+ @param ignored
+ @param conf job configuration
+ @return the {@link Job} , with no connection to a cluster yet.
+ @throws IOException
+ @deprecated Use {@link #getInstance(Configuration)}]]>
+      </doc>
+    </method>
+    <method name="getStatus" return="org.apache.hadoop.mapreduce.JobStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getJobState" return="org.apache.hadoop.mapreduce.JobStatus.State"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Returns the current state of the Job.
+ 
+ @return JobStatus#State
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getTrackingURL" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the URL where some job progress information will be displayed.
+ 
+ @return the URL where some job progress information will be displayed.]]>
+      </doc>
+    </method>
+    <method name="getJobFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the path of the submitted job configuration.
+ 
+ @return the path of the submitted job configuration.]]>
+      </doc>
+    </method>
+    <method name="getStartTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get start time of the job.
+ 
+ @return the start time of the job]]>
+      </doc>
+    </method>
+    <method name="getFinishTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get finish time of the job.
+ 
+ @return the finish time of the job]]>
+      </doc>
+    </method>
+    <method name="getSchedulingInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get scheduling info of the job.
+ 
+ @return the scheduling info of the job]]>
+      </doc>
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.mapreduce.JobPriority"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get scheduling info of the job.
+ 
+ @return the priority info of the job]]>
+      </doc>
+    </method>
+    <method name="getJobName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The user-specified job name.]]>
+      </doc>
+    </method>
+    <method name="getHistoryUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="isRetired" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Dump stats to screen.]]>
+      </doc>
+    </method>
+    <method name="getTaskReports" return="org.apache.hadoop.mapreduce.TaskReport[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="org.apache.hadoop.mapreduce.TaskType"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the information of the current state of the tasks of a job.
+ 
+ @param type Type of the task
+ @return the list of all of the map tips.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="mapProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the <i>progress</i> of the job's map-tasks, as a float between 0.0 
+ and 1.0.  When all map tasks have completed, the function returns 1.0.
+ 
+ @return the progress of the job's map-tasks.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="reduceProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the <i>progress</i> of the job's reduce-tasks, as a float between 0.0 
+ and 1.0.  When all reduce tasks have completed, the function returns 1.0.
+ 
+ @return the progress of the job's reduce-tasks.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="cleanupProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the <i>progress</i> of the job's cleanup-tasks, as a float between 0.0 
+ and 1.0.  When all cleanup tasks have completed, the function returns 1.0.
+ 
+ @return the progress of the job's cleanup-tasks.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setupProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the <i>progress</i> of the job's setup-tasks, as a float between 0.0 
+ and 1.0.  When all setup tasks have completed, the function returns 1.0.
+ 
+ @return the progress of the job's setup-tasks.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="isComplete" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Check if the job is finished or not. 
+ This is a non-blocking call.
+ 
+ @return <code>true</code> if the job is complete, else <code>false</code>.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="isSuccessful" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Check if the job completed successfully. 
+ 
+ @return <code>true</code> if the job succeeded, else <code>false</code>.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="killJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Kill the running job.  Blocks until all job tasks have been
+ killed as well.  If the job is no longer running, it simply returns.
+ 
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setPriority"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobPriority" type="org.apache.hadoop.mapreduce.JobPriority"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Set the priority of a running job.
+ @param jobPriority the new priority for the job.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setPriorityAsInteger"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobPriority" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Set the priority of a running job.
+
+ @param jobPriority
+          the new priority for the job.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapreduce.TaskCompletionEvent[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="startFrom" type="int"/>
+      <param name="numEvents" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get events indicating completion (success/failure) of component tasks.
+  
+ @param startFrom index to start fetching events from
+ @param numEvents number of events to fetch
+ @return an array of {@link TaskCompletionEvent}s
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="startFrom" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get events indicating completion (success/failure) of component tasks.
+  
+ @param startFrom index to start fetching events from
+ @return an array of {@link org.apache.hadoop.mapred.TaskCompletionEvent}s
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="killTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskId" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Kill indicated task attempt.
+ 
+ @param taskId the id of the task to be terminated.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="failTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskId" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Fail indicated task attempt.
+ 
+ @param taskId the id of the task to be terminated.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getCounters" return="org.apache.hadoop.mapreduce.Counters"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets the counters for this job. May return null if the job has been
+ retired and the job is no longer in the completed job store.
+ 
+ @return the counters for this job.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getTaskDiagnostics" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskid" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Gets the diagnostic messages for a given task attempt.
+ @param taskid
+ @return the list of diagnostic messages for the task
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setNumReduceTasks"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tasks" type="int"/>
+      <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+      <doc>
+      <![CDATA[Set the number of reduce tasks for the job.
+ @param tasks the number of reduce tasks
+ @throws IllegalStateException if the job is submitted]]>
+      </doc>
+    </method>
+    <method name="setWorkingDirectory"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dir" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the current working directory for the default file system.
+ 
+ @param dir the new current working directory.
+ @throws IllegalStateException if the job is submitted]]>
+      </doc>
+    </method>
+    <method name="setInputFormatClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cls" type="java.lang.Class"/>
+      <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+      <doc>
+      <![CDATA[Set the {@link InputFormat} for the job.
+ @param cls the <code>InputFormat</code> to use
+ @throws IllegalStateException if the job is submitted]]>
+      </doc>
+    </method>
+    <method name="setOutputFormatClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cls" type="java.lang.Class"/>
+      <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+      <doc>
+      <![CDATA[Set the {@link OutputFormat} for the job.
+ @param cls the <code>OutputFormat</code> to use
+ @throws IllegalStateException if the job is submitted]]>
+      </doc>
+    </method>
+    <method name="setMapperClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cls" type="java.lang.Class"/>
+      <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+      <doc>
+      <![CDATA[Set the {@link Mapper} for the job.
+ @param cls the <code>Mapper</code> to use
+ @throws IllegalStateException if the job is submitted]]>
+      </doc>
+    </method>
+    <method name="setJarByClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cls" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the Jar by finding where a given class came from.
+ @param cls the example class]]>
+      </doc>
+    </method>
+    <method name="setJar"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jar" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the job jar]]>
+      </doc>
+    </method>
+    <method name="setUser"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the reported username for this job.
+ 
+ @param user the username for this job.]]>
+      </doc>
+    </method>
+    <method name="setCombinerClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cls" type="java.lang.Class"/>
+      <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+      <doc>
+      <![CDATA[Set the combiner class for the job.
+ @param cls the combiner to use
+ @throws IllegalStateException if the job is submitted]]>
+      </doc>
+    </method>
+    <method name="setReducerClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cls" type="java.lang.Class"/>
+      <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+      <doc>
+      <![CDATA[Set the {@link Reducer} for the job.
+ @param cls the <code>Reducer</code> to use
+ @throws IllegalStateException if the job is submitted]]>
+      </doc>
+    </method>
+    <method name="setPartitionerClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cls" type="java.lang.Class"/>
+      <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+      <doc>
+      <![CDATA[Set the {@link Partitioner} for the job.
+ @param cls the <code>Partitioner</code> to use
+ @throws IllegalStateException if the job is submitted]]>
+      </doc>
+    </method>
+    <method name="setMapOutputKeyClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+      <doc>
+      <![CDATA[Set the key class for the map output data. This allows the user to
+ specify the map output key class to be different than the final output
+ value class.
+ 
+ @param theClass the map output key class.
+ @throws IllegalStateException if the job is submitted]]>
+      </doc>
+    </method>
+    <method name="setMapOutputValueClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+      <doc>
+      <![CDATA[Set the value class for the map output data. This allows the user to
+ specify the map output value class to be different than the final output
+ value class.
+ 
+ @param theClass the map output value class.
+ @throws IllegalStateException if the job is submitted]]>
+      </doc>
+    </method>
+    <method name="setOutputKeyClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+      <doc>
+      <![CDATA[Set the key class for the job output data.
+ 
+ @param theClass the key class for the job output data.
+ @throws IllegalStateException if the job is submitted]]>
+      </doc>
+    </method>
+    <method name="setOutputValueClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="theClass" type="java.lang.Class"/>
+      <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+      <doc>
+      <![CDATA[Set the value class for job outputs.
+ 
+ @param theClass the value class for job outputs.
+ @throws IllegalStateException if the job is submitted]]>
+      </doc>
+    </method>
+    <method name="setCombinerKeyGroupingComparatorClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cls" type="java.lang.Class"/>
+      <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+      <doc>
+      <![CDATA[Define the comparator that controls which keys are grouped together
+ for a single call to combiner,
+ {@link Reducer#reduce(Object, Iterable,
+ org.apache.hadoop.mapreduce.Reducer.Context)}
+
+ @param cls the raw comparator to use
+ @throws IllegalStateException if the job is submitted]]>
+      </doc>
+    </method>
+    <method name="setSortComparatorClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cls" type="java.lang.Class"/>
+      <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+      <doc>
+      <![CDATA[Define the comparator that controls how the keys are sorted before they
+ are passed to the {@link Reducer}.
+ @param cls the raw comparator
+ @throws IllegalStateException if the job is submitted
+ @see #setCombinerKeyGroupingComparatorClass(Class)]]>
+      </doc>
+    </method>
+    <method name="setGroupingComparatorClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cls" type="java.lang.Class"/>
+      <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+      <doc>
+      <![CDATA[Define the comparator that controls which keys are grouped together
+ for a single call to 
+ {@link Reducer#reduce(Object, Iterable, 
+                       org.apache.hadoop.mapreduce.Reducer.Context)}
+ @param cls the raw comparator to use
+ @throws IllegalStateException if the job is submitted
+ @see #setCombinerKeyGroupingComparatorClass(Class)]]>
+      </doc>
+    </method>
+    <method name="setJobName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <exception name="IllegalStateException" type="java.lang.IllegalStateException"/>
+      <doc>
+      <![CDATA[Set the user-specified job name.
+ 
+ @param name the job's new name.
+ @throws IllegalStateException if the job is submitted]]>
+      </doc>
+    </method>
+    <method name="setSpeculativeExecution"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="speculativeExecution" type="boolean"/>
+      <doc>
+      <![CDATA[Turn speculative execution on or off for this job. 
+ 
+ @param speculativeExecution <code>true</code> if speculative execution 
+                             should be turned on, else <code>false</code>.]]>
+      </doc>
+    </method>
+    <method name="setMapSpeculativeExecution"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="speculativeExecution" type="boolean"/>
+      <doc>
+      <![CDATA[Turn speculative execution on or off for this job for map tasks. 
+ 
+ @param speculativeExecution <code>true</code> if speculative execution 
+                             should be turned on for map tasks,
+                             else <code>false</code>.]]>
+      </doc>
+    </method>
+    <method name="setReduceSpeculativeExecution"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="speculativeExecution" type="boolean"/>
+      <doc>
+      <![CDATA[Turn speculative execution on or off for this job for reduce tasks. 
+ 
+ @param speculativeExecution <code>true</code> if speculative execution 
+                             should be turned on for reduce tasks,
+                             else <code>false</code>.]]>
+      </doc>
+    </method>
+    <method name="setJobSetupCleanupNeeded"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="needed" type="boolean"/>
+      <doc>
+      <![CDATA[Specify whether job-setup and job-cleanup is needed for the job 
+ 
+ @param needed If <code>true</code>, job-setup and job-cleanup will be
+               considered from {@link OutputCommitter} 
+               else ignored.]]>
+      </doc>
+    </method>
+    <method name="setCacheArchives"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="archives" type="java.net.URI[]"/>
+      <doc>
+      <![CDATA[Set the given set of archives
+ @param archives The list of archives that need to be localized]]>
+      </doc>
+    </method>
+    <method name="setCacheFiles"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="files" type="java.net.URI[]"/>
+      <doc>
+      <![CDATA[Set the given set of files
+ @param files The list of files that need to be localized]]>
+      </doc>
+    </method>
+    <method name="addCacheArchive"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <doc>
+      <![CDATA[Add a archives to be localized
+ @param uri The uri of the cache to be localized]]>
+      </doc>
+    </method>
+    <method name="addCacheFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <doc>
+      <![CDATA[Add a file to be localized
+ @param uri The uri of the cache to be localized]]>
+      </doc>
+    </method>
+    <method name="addFileToClassPath"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Add an file path to the current set of classpath entries It adds the file
+ to cache as well.
+ 
+ Files added with this method will not be unpacked while being added to the
+ classpath.
+ To add archives to classpath, use the {@link #addArchiveToClassPath(Path)}
+ method instead.
+
+ @param file Path of the file to be added]]>
+      </doc>
+    </method>
+    <method name="addArchiveToClassPath"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="archive" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Add an archive path to the current set of classpath entries. It adds the
+ archive to cache as well.
+ 
+ Archive files will be unpacked and added to the classpath
+ when being distributed.
+
+ @param archive Path of the archive to be added]]>
+      </doc>
+    </method>
+    <method name="createSymlink"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Originally intended to enable symlinks, but currently symlinks cannot be
+ disabled.]]>
+      </doc>
+    </method>
+    <method name="setMaxMapAttempts"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="n" type="int"/>
+      <doc>
+      <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ map task.
+ 
+ @param n the number of attempts per map task.]]>
+      </doc>
+    </method>
+    <method name="setMaxReduceAttempts"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="n" type="int"/>
+      <doc>
+      <![CDATA[Expert: Set the number of maximum attempts that will be made to run a
+ reduce task.
+ 
+ @param n the number of attempts per reduce task.]]>
+      </doc>
+    </method>
+    <method name="setProfileEnabled"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newValue" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether the system should collect profiler information for some of 
+ the tasks in this job? The information is stored in the user log 
+ directory.
+ @param newValue true means it should be gathered]]>
+      </doc>
+    </method>
+    <method name="setProfileParams"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the profiler configuration arguments. If the string contains a '%s' it
+ will be replaced with the name of the profiling output file when the task
+ runs.
+
+ This value is passed to the task child JVM on the command line.
+
+ @param value the configuration string]]>
+      </doc>
+    </method>
+    <method name="setProfileTaskRange"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="isMap" type="boolean"/>
+      <param name="newValue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the ranges of maps or reduces to profile. setProfileEnabled(true) 
+ must also be called.
+ @param newValue a set of integer ranges of the map ids]]>
+      </doc>
+    </method>
+    <method name="setCancelDelegationTokenUponJobCompletion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="boolean"/>
+      <doc>
+      <![CDATA[Sets the flag that will allow the JobTracker to cancel the HDFS delegation
+ tokens upon job completion. Defaults to true.]]>
+      </doc>
+    </method>
+    <method name="addFileToSharedCache" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resource" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Add a file to job config for shared cache processing. If shared cache is
+ enabled, it will return true, otherwise, return false. We don't check with
+ SCM here given application might not be able to provide the job id;
+ ClientSCMProtocol.use requires the application id. Job Submitter will read
+ the files from job config and take care of things.
+
+ @param resource The resource that Job Submitter will process later using
+          shared cache.
+ @param conf Configuration to add the resource to
+ @return whether the resource has been added to the configuration]]>
+      </doc>
+    </method>
+    <method name="addFileToSharedCacheAndClasspath" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resource" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Add a file to job config for shared cache processing. If shared cache is
+ enabled, it will return true, otherwise, return false. We don't check with
+ SCM here given application might not be able to provide the job id;
+ ClientSCMProtocol.use requires the application id. Job Submitter will read
+ the files from job config and take care of things. Job Submitter will also
+ add the file to classpath. Intended to be used by user code.
+
+ @param resource The resource that Job Submitter will process later using
+          shared cache.
+ @param conf Configuration to add the resource to
+ @return whether the resource has been added to the configuration]]>
+      </doc>
+    </method>
+    <method name="addArchiveToSharedCache" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resource" type="java.net.URI"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Add an archive to job config for shared cache processing. If shared cache
+ is enabled, it will return true, otherwise, return false. We don't check
+ with SCM here given application might not be able to provide the job id;
+ ClientSCMProtocol.use requires the application id. Job Submitter will read
+ the files from job config and take care of things. Intended to be used by
+ user code.
+
+ @param resource The resource that Job Submitter will process later using
+          shared cache.
+ @param conf Configuration to add the resource to
+ @return whether the resource has been added to the configuration]]>
+      </doc>
+    </method>
+    <method name="setFileSharedCacheUploadPolicies"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="policies" type="java.util.Map"/>
+      <doc>
+      <![CDATA[This is to set the shared cache upload policies for files. If the parameter
+ was previously set, this method will replace the old value with the new
+ provided map.
+
+ @param conf Configuration which stores the shared cache upload policies
+ @param policies A map containing the shared cache upload policies for a set
+          of resources. The key is the url of the resource and the value is
+          the upload policy. True if it should be uploaded, false otherwise.]]>
+      </doc>
+    </method>
+    <method name="setArchiveSharedCacheUploadPolicies"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="policies" type="java.util.Map"/>
+      <doc>
+      <![CDATA[This is to set the shared cache upload policies for archives. If the
+ parameter was previously set, this method will replace the old value with
+ the new provided map.
+
+ @param conf Configuration which stores the shared cache upload policies
+ @param policies A map containing the shared cache upload policies for a set
+          of resources. The key is the url of the resource and the value is
+          the upload policy. True if it should be uploaded, false otherwise.]]>
+      </doc>
+    </method>
+    <method name="getFileSharedCacheUploadPolicies" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[This is to get the shared cache upload policies for files.
+
+ @param conf Configuration which stores the shared cache upload policies
+ @return A map containing the shared cache upload policies for a set of
+         resources. The key is the url of the resource and the value is the
+         upload policy. True if it should be uploaded, false otherwise.]]>
+      </doc>
+    </method>
+    <method name="getArchiveSharedCacheUploadPolicies" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[This is to get the shared cache upload policies for archives.
+
+ @param conf Configuration which stores the shared cache upload policies
+ @return A map containing the shared cache upload policies for a set of
+         resources. The key is the url of the resource and the value is the
+         upload policy. True if it should be uploaded, false otherwise.]]>
+      </doc>
+    </method>
+    <method name="submit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+      <doc>
+      <![CDATA[Submit the job to the cluster and return immediately.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="waitForCompletion" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="verbose" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+      <doc>
+      <![CDATA[Submit the job to the cluster and wait for it to finish.
+ @param verbose print the progress to the user
+ @return true if the job succeeded
+ @throws IOException thrown if the communication with the 
+         <code>JobTracker</code> is lost]]>
+      </doc>
+    </method>
+    <method name="monitorAndPrintJob" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Monitor a job and print status in real-time as progress is made and tasks 
+ fail.
+ @return true if the job succeeded
+ @throws IOException if communication to the JobTracker fails]]>
+      </doc>
+    </method>
+    <method name="getProgressPollInterval" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[The interval at which monitorAndPrintJob() prints status]]>
+      </doc>
+    </method>
+    <method name="getCompletionPollInterval" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[The interval at which waitForCompletion() should check.]]>
+      </doc>
+    </method>
+    <method name="getTaskOutputFilter" return="org.apache.hadoop.mapreduce.Job.TaskStatusFilter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the task output filter.
+ 
+ @param conf the configuration.
+ @return the filter level.]]>
+      </doc>
+    </method>
+    <method name="setTaskOutputFilter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="newValue" type="org.apache.hadoop.mapreduce.Job.TaskStatusFilter"/>
+      <doc>
+      <![CDATA[Modify the Configuration to set the task output filter.
+ 
+ @param conf the Configuration to modify.
+ @param newValue the value to set.]]>
+      </doc>
+    </method>
+    <method name="isUber" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getReservationId" return="org.apache.hadoop.yarn.api.records.ReservationId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the reservation to which the job is submitted to, if any
+
+ @return the reservationId the identifier of the job's reservation, null if
+         the job does not have any reservation associated with it]]>
+      </doc>
+    </method>
+    <method name="setReservationId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservationId" type="org.apache.hadoop.yarn.api.records.ReservationId"/>
+      <doc>
+      <![CDATA[Set the reservation to which the job is submitted to
+
+ @param reservationId the reservationId to set]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Close the <code>Job</code>.
+ @throws IOException if fail to close.]]>
+      </doc>
+    </method>
+    <field name="OUTPUT_FILTER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="COMPLETION_POLL_INTERVAL_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Key in mapred-*.xml that sets completionPollInvervalMillis]]>
+      </doc>
+    </field>
+    <field name="PROGRESS_MONITOR_POLL_INTERVAL_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Key in mapred-*.xml that sets progMonitorPollIntervalMillis]]>
+      </doc>
+    </field>
+    <field name="USED_GENERIC_PARSER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SUBMIT_REPLICATION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_SUBMIT_REPLICATION" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="USE_WILDCARD_FOR_LIBJARS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_USE_WILDCARD_FOR_LIBJARS" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[The job submitter's view of the Job.
+ 
+ <p>It allows the user to configure the
+ job, submit it, control its execution, and query the state. The set methods
+ only work until the job is submitted, afterwards they will throw an 
+ IllegalStateException. </p>
+ 
+ <p>
+ Normally the user creates the application, describes various facets of the
+ job via {@link Job} and then submits the job and monitor its progress.</p>
+ 
+ <p>Here is an example on how to submit a job:</p>
+ <p><blockquote><pre>
+     // Create a new Job
+     Job job = Job.getInstance();
+     job.setJarByClass(MyJob.class);
+     
+     // Specify various job-specific parameters     
+     job.setJobName("myjob");
+     
+     job.setInputPath(new Path("in"));
+     job.setOutputPath(new Path("out"));
+     
+     job.setMapperClass(MyJob.MyMapper.class);
+     job.setReducerClass(MyJob.MyReducer.class);
+
+     // Submit the job, then poll for progress until the job is complete
+     job.waitForCompletion(true);
+ </pre></blockquote>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.Job -->
+  <!-- start interface org.apache.hadoop.mapreduce.JobContext -->
+  <interface name="JobContext"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.MRJobConfig"/>
+    <method name="getConfiguration" return="org.apache.hadoop.conf.Configuration"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the configuration for the job.
+ @return the shared configuration object]]>
+      </doc>
+    </method>
+    <method name="getCredentials" return="org.apache.hadoop.security.Credentials"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get credentials for the job.
+ @return credentials for the job]]>
+      </doc>
+    </method>
+    <method name="getJobID" return="org.apache.hadoop.mapreduce.JobID"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the unique ID for the job.
+ @return the object with the job id]]>
+      </doc>
+    </method>
+    <method name="getNumReduceTasks" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get configured the number of reduce tasks for this job. Defaults to 
+ <code>1</code>.
+ @return the number of reduce tasks for this job.]]>
+      </doc>
+    </method>
+    <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the current working directory for the default file system.
+ 
+ @return the directory name.]]>
+      </doc>
+    </method>
+    <method name="getOutputKeyClass" return="java.lang.Class"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the key class for the job output data.
+ @return the key class for the job output data.]]>
+      </doc>
+    </method>
+    <method name="getOutputValueClass" return="java.lang.Class"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the value class for job outputs.
+ @return the value class for job outputs.]]>
+      </doc>
+    </method>
+    <method name="getMapOutputKeyClass" return="java.lang.Class"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the key class for the map output data. If it is not set, use the
+ (final) output key class. This allows the map output key class to be
+ different than the final output key class.
+ @return the map output key class.]]>
+      </doc>
+    </method>
+    <method name="getMapOutputValueClass" return="java.lang.Class"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the value class for the map output data. If it is not set, use the
+ (final) output value class This allows the map output value class to be
+ different than the final output value class.
+  
+ @return the map output value class.]]>
+      </doc>
+    </method>
+    <method name="getJobName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user-specified job name. This is only used to identify the 
+ job to the user.
+ 
+ @return the job's name, defaulting to "".]]>
+      </doc>
+    </method>
+    <method name="getInputFormatClass" return="java.lang.Class"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+      <doc>
+      <![CDATA[Get the {@link InputFormat} class for the job.
+ 
+ @return the {@link InputFormat} class for the job.]]>
+      </doc>
+    </method>
+    <method name="getMapperClass" return="java.lang.Class"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+      <doc>
+      <![CDATA[Get the {@link Mapper} class for the job.
+ 
+ @return the {@link Mapper} class for the job.]]>
+      </doc>
+    </method>
+    <method name="getCombinerClass" return="java.lang.Class"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+      <doc>
+      <![CDATA[Get the combiner class for the job.
+ 
+ @return the combiner class for the job.]]>
+      </doc>
+    </method>
+    <method name="getReducerClass" return="java.lang.Class"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+      <doc>
+      <![CDATA[Get the {@link Reducer} class for the job.
+ 
+ @return the {@link Reducer} class for the job.]]>
+      </doc>
+    </method>
+    <method name="getOutputFormatClass" return="java.lang.Class"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+      <doc>
+      <![CDATA[Get the {@link OutputFormat} class for the job.
+ 
+ @return the {@link OutputFormat} class for the job.]]>
+      </doc>
+    </method>
+    <method name="getPartitionerClass" return="java.lang.Class"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+      <doc>
+      <![CDATA[Get the {@link Partitioner} class for the job.
+ 
+ @return the {@link Partitioner} class for the job.]]>
+      </doc>
+    </method>
+    <method name="getSortComparator" return="org.apache.hadoop.io.RawComparator"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link RawComparator} comparator used to compare keys.
+ 
+ @return the {@link RawComparator} comparator used to compare keys.]]>
+      </doc>
+    </method>
+    <method name="getJar" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the pathname of the job's jar.
+ @return the pathname]]>
+      </doc>
+    </method>
+    <method name="getCombinerKeyGroupingComparator" return="org.apache.hadoop.io.RawComparator"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user defined {@link RawComparator} comparator for
+ grouping keys of inputs to the combiner.
+
+ @return comparator set by the user for grouping values.
+ @see Job#setCombinerKeyGroupingComparatorClass(Class)]]>
+      </doc>
+    </method>
+    <method name="getGroupingComparator" return="org.apache.hadoop.io.RawComparator"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user defined {@link RawComparator} comparator for
+ grouping keys of inputs to the reduce.
+
+ @return comparator set by the user for grouping values.
+ @see Job#setGroupingComparatorClass(Class)
+ @see #getCombinerKeyGroupingComparator()]]>
+      </doc>
+    </method>
+    <method name="getJobSetupCleanupNeeded" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get whether job-setup and job-cleanup is needed for the job 
+ 
+ @return boolean]]>
+      </doc>
+    </method>
+    <method name="getTaskCleanupNeeded" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get whether task-cleanup is needed for the job 
+ 
+ @return boolean]]>
+      </doc>
+    </method>
+    <method name="getProfileEnabled" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get whether the task profiling is enabled.
+ @return true if some tasks will be profiled]]>
+      </doc>
+    </method>
+    <method name="getProfileParams" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the profiler configuration arguments.
+
+ The default value for this property is
+ "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s"
+ 
+ @return the parameters to pass to the task child to configure profiling]]>
+      </doc>
+    </method>
+    <method name="getProfileTaskRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="isMap" type="boolean"/>
+      <doc>
+      <![CDATA[Get the range of maps or reduces to profile.
+ @param isMap is the task a map?
+ @return the task ranges]]>
+      </doc>
+    </method>
+    <method name="getUser" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the reported username for this job.
+ 
+ @return the username]]>
+      </doc>
+    </method>
+    <method name="getSymlink" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Originally intended to check if symlinks should be used, but currently
+ symlinks cannot be disabled.
+ @return true]]>
+      </doc>
+    </method>
+    <method name="getArchiveClassPaths" return="org.apache.hadoop.fs.Path[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the archive entries in classpath as an array of Path]]>
+      </doc>
+    </method>
+    <method name="getCacheArchives" return="java.net.URI[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get cache archives set in the Configuration
+ @return A URI array of the caches set in the Configuration
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getCacheFiles" return="java.net.URI[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get cache files set in the Configuration
+ @return A URI array of the files set in the Configuration
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getLocalCacheArchives" return="org.apache.hadoop.fs.Path[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="the array returned only includes the items the were 
+ downloaded. There is no way to map this to what is returned by
+ {@link #getCacheArchives()}.">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the path array of the localized caches
+ @return A path array of localized caches
+ @throws IOException
+ @deprecated the array returned only includes the items the were 
+ downloaded. There is no way to map this to what is returned by
+ {@link #getCacheArchives()}.]]>
+      </doc>
+    </method>
+    <method name="getLocalCacheFiles" return="org.apache.hadoop.fs.Path[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="the array returned only includes the items the were 
+ downloaded. There is no way to map this to what is returned by
+ {@link #getCacheFiles()}.">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the path array of the localized files
+ @return A path array of localized files
+ @throws IOException
+ @deprecated the array returned only includes the items the were 
+ downloaded. There is no way to map this to what is returned by
+ {@link #getCacheFiles()}.]]>
+      </doc>
+    </method>
+    <method name="getFileClassPaths" return="org.apache.hadoop.fs.Path[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the file entries in classpath as an array of Path]]>
+      </doc>
+    </method>
+    <method name="getArchiveTimestamps" return="java.lang.String[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the timestamps of the archives.  Used by internal
+ DistributedCache and MapReduce code.
+ @return a string array of timestamps]]>
+      </doc>
+    </method>
+    <method name="getFileTimestamps" return="java.lang.String[]"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the timestamps of the files.  Used by internal
+ DistributedCache and MapReduce code.
+ @return a string array of timestamps]]>
+      </doc>
+    </method>
+    <method name="getMaxMapAttempts" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the configured number of maximum attempts that will be made to run a
+ map task, as specified by the <code>mapred.map.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+  
+ @return the max number of attempts per map task.]]>
+      </doc>
+    </method>
+    <method name="getMaxReduceAttempts" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the configured number of maximum attempts  that will be made to run a
+ reduce task, as specified by the <code>mapred.reduce.max.attempts</code>
+ property. If this property is not already set, the default is 4 attempts.
+ 
+ @return the max number of attempts per reduce task.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A read-only view of the job that is provided to the tasks while they
+ are running.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.JobContext -->
+  <!-- start class org.apache.hadoop.mapreduce.JobCounter -->
+  <class name="JobCounter" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.mapreduce.JobCounter[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.mapreduce.JobCounter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.JobCounter -->
+  <!-- start class org.apache.hadoop.mapreduce.JobID -->
+  <class name="JobID" extends="org.apache.hadoop.mapred.ID"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="JobID" type="java.lang.String, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a JobID object 
+ @param jtIdentifier jobTracker identifier
+ @param id job number]]>
+      </doc>
+    </constructor>
+    <constructor name="JobID"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getJtIdentifier" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.mapreduce.ID"/>
+      <doc>
+      <![CDATA[Compare JobIds by first jtIdentifiers, then by job numbers]]>
+      </doc>
+    </method>
+    <method name="appendTo" return="java.lang.StringBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="builder" type="java.lang.StringBuilder"/>
+      <doc>
+      <![CDATA[Add the stuff after the "job" prefix to the given builder. This is useful,
+ because the sub-ids use this substring at the start of their string.
+ @param builder the builder to append to
+ @return the builder that was passed in]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="forName" return="org.apache.hadoop.mapreduce.JobID"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="str" type="java.lang.String"/>
+      <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+      <doc>
+      <![CDATA[Construct a JobId object from given string 
+ @return constructed JobId object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+      </doc>
+    </method>
+    <field name="JOB" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="JOBID_REGEX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="idFormat" type="java.text.NumberFormat"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[JobID represents the immutable and unique identifier for 
+ the job. JobID consists of two parts. First part 
+ represents the jobtracker identifier, so that jobID to jobtracker map 
+ is defined. For cluster setup this string is the jobtracker 
+ start time, for local setting, it is "local" and a random number.
+ Second part of the JobID is the job number. <br> 
+ An example JobID is : 
+ <code>job_200707121733_0003</code> , which represents the third job 
+ running at the jobtracker started at <code>200707121733</code>. 
+ <p>
+ Applications should never construct or parse JobID strings, but rather 
+ use appropriate constructors or {@link #forName(String)} method. 
+ 
+ @see TaskID
+ @see TaskAttemptID]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.JobID -->
+  <!-- start class org.apache.hadoop.mapreduce.JobPriority -->
+  <class name="JobPriority" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.mapreduce.JobPriority[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.mapreduce.JobPriority"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Used to describe the priority of the running job. 
+ DEFAULT : While submitting a job, if the user is not specifying priority,
+ YARN has the capability to pick the default priority as per its config.
+ Hence MapReduce can indicate such cases with this new enum.
+ UNDEFINED_PRIORITY : YARN supports priority as an integer. Hence other than
+ the five defined enums, YARN can consider other integers also. To generalize
+ such cases, this specific enum is used.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.JobPriority -->
+  <!-- start class org.apache.hadoop.mapreduce.JobStatus -->
+  <class name="JobStatus" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <implements name="java.lang.Cloneable"/>
+    <constructor name="JobStatus"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapreduce.JobID, float, float, float, float, org.apache.hadoop.mapreduce.JobStatus.State, org.apache.hadoop.mapreduce.JobPriority, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param setupProgress The progress made on the setup
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on the cleanup
+ @param runState The current state of the job
+ @param jp Priority of the job.
+ @param user userid of the person who submitted the job.
+ @param jobName user-specified job name.
+ @param jobFile job configuration file.
+ @param trackingUrl link to the web-ui for details of the job.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapreduce.JobID, float, float, float, float, org.apache.hadoop.mapreduce.JobStatus.State, org.apache.hadoop.mapreduce.JobPriority, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param setupProgress The progress made on the setup
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on the cleanup
+ @param runState The current state of the job
+ @param jp Priority of the job.
+ @param user userid of the person who submitted the job.
+ @param jobName user-specified job name.
+ @param queue queue name
+ @param jobFile job configuration file.
+ @param trackingUrl link to the web-ui for details of the job.]]>
+      </doc>
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapreduce.JobID, float, float, float, float, org.apache.hadoop.mapreduce.JobStatus.State, org.apache.hadoop.mapreduce.JobPriority, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String, boolean"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param setupProgress The progress made on the setup
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on the cleanup
+ @param runState The current state of the job
+ @param jp Priority of the job.
+ @param user userid of the person who submitted the job.
+ @param jobName user-specified job name.
+ @param queue queue name
+ @param jobFile job configuration file.
+ @param trackingUrl link to the web-ui for details of the job.
+ @param isUber Whether job running in uber mode]]>
+      </doc>
+    </constructor>
+    <constructor name="JobStatus" type="org.apache.hadoop.mapreduce.JobID, float, float, float, float, org.apache.hadoop.mapreduce.JobStatus.State, org.apache.hadoop.mapreduce.JobPriority, java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String, boolean, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a job status object for a given jobid.
+ @param jobid The jobid of the job
+ @param setupProgress The progress made on the setup
+ @param mapProgress The progress made on the maps
+ @param reduceProgress The progress made on the reduces
+ @param cleanupProgress The progress made on the cleanup
+ @param runState The current state of the job
+ @param jp Priority of the job.
+ @param user userid of the person who submitted the job.
+ @param jobName user-specified job name.
+ @param queue queue name
+ @param jobFile job configuration file.
+ @param trackingUrl link to the web-ui for details of the job.
+ @param isUber Whether job running in uber mode
+ @param historyFile history file]]>
+      </doc>
+    </constructor>
+    <method name="setMapProgress"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="p" type="float"/>
+      <doc>
+      <![CDATA[Sets the map progress of this job
+ @param p The value of map progress to set to]]>
+      </doc>
+    </method>
+    <method name="setCleanupProgress"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="p" type="float"/>
+      <doc>
+      <![CDATA[Sets the cleanup progress of this job
+ @param p The value of cleanup progress to set to]]>
+      </doc>
+    </method>
+    <method name="setSetupProgress"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="p" type="float"/>
+      <doc>
+      <![CDATA[Sets the setup progress of this job
+ @param p The value of setup progress to set to]]>
+      </doc>
+    </method>
+    <method name="setReduceProgress"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="p" type="float"/>
+      <doc>
+      <![CDATA[Sets the reduce progress of this Job
+ @param p The value of reduce progress to set to]]>
+      </doc>
+    </method>
+    <method name="setPriority"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="jp" type="org.apache.hadoop.mapreduce.JobPriority"/>
+      <doc>
+      <![CDATA[Set the priority of the job, defaulting to NORMAL.
+ @param jp new job priority]]>
+      </doc>
+    </method>
+    <method name="setFinishTime"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="finishTime" type="long"/>
+      <doc>
+      <![CDATA[Set the finish time of the job
+ @param finishTime The finishTime of the job]]>
+      </doc>
+    </method>
+    <method name="setHistoryFile"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="historyFile" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the job history file url for a completed job]]>
+      </doc>
+    </method>
+    <method name="setTrackingUrl"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="trackingUrl" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the link to the web-ui for details of the job.]]>
+      </doc>
+    </method>
+    <method name="setRetired"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Set the job retire flag to true.]]>
+      </doc>
+    </method>
+    <method name="setState"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="state" type="org.apache.hadoop.mapreduce.JobStatus.State"/>
+      <doc>
+      <![CDATA[Change the current run state of the job.]]>
+      </doc>
+    </method>
+    <method name="setStartTime"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="startTime" type="long"/>
+      <doc>
+      <![CDATA[Set the start time of the job
+ @param startTime The startTime of the job]]>
+      </doc>
+    </method>
+    <method name="setUsername"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="userName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[@param userName The username of the job]]>
+      </doc>
+    </method>
+    <method name="setSchedulingInfo"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="schedulingInfo" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Used to set the scheduling information associated to a particular Job.
+ 
+ @param schedulingInfo Scheduling information of the job]]>
+      </doc>
+    </method>
+    <method name="setJobACLs"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="acls" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Set the job acls.
+ 
+ @param acls {@link Map} from {@link JobACL} to {@link AccessControlList}]]>
+      </doc>
+    </method>
+    <method name="setQueue"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="queue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set queue name
+ @param queue queue name]]>
+      </doc>
+    </method>
+    <method name="setFailureInfo"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="failureInfo" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set diagnostic information.
+ @param failureInfo diagnostic information]]>
+      </doc>
+    </method>
+    <method name="getQueue" return="java.lang.String"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get queue name
+ @return queue name]]>
+      </doc>
+    </method>
+    <method name="getMapProgress" return="float"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Percentage of progress in maps]]>
+      </doc>
+    </method>
+    <method name="getCleanupProgress" return="float"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Percentage of progress in cleanup]]>
+      </doc>
+    </method>
+    <method name="getSetupProgress" return="float"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Percentage of progress in setup]]>
+      </doc>
+    </method>
+    <method name="getReduceProgress" return="float"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Percentage of progress in reduce]]>
+      </doc>
+    </method>
+    <method name="getState" return="org.apache.hadoop.mapreduce.JobStatus.State"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return running state of the job]]>
+      </doc>
+    </method>
+    <method name="getStartTime" return="long"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return start time of the job]]>
+      </doc>
+    </method>
+    <method name="clone" return="java.lang.Object"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getJobID" return="org.apache.hadoop.mapreduce.JobID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return The jobid of the Job]]>
+      </doc>
+    </method>
+    <method name="getUsername" return="java.lang.String"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the username of the job]]>
+      </doc>
+    </method>
+    <method name="getSchedulingInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Gets the Scheduling information associated to a particular Job.
+ @return the scheduling information of the job]]>
+      </doc>
+    </method>
+    <method name="getJobACLs" return="java.util.Map"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the job acls.
+ 
+ @return a {@link Map} from {@link JobACL} to {@link AccessControlList}]]>
+      </doc>
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.mapreduce.JobPriority"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the priority of the job
+ @return job priority]]>
+      </doc>
+    </method>
+    <method name="getFailureInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Gets any available info on the reason of failure of the job.
+ @return diagnostic information on why a job might have failed.]]>
+      </doc>
+    </method>
+    <method name="isJobComplete" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns true if the status is for a completed job.]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getJobName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user-specified job name.]]>
+      </doc>
+    </method>
+    <method name="getJobFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the configuration file for the job.]]>
+      </doc>
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the link to the web-ui for details of the job.]]>
+      </doc>
+    </method>
+    <method name="getFinishTime" return="long"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the finish time of the job.]]>
+      </doc>
+    </method>
+    <method name="isRetired" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Check whether the job has retired.]]>
+      </doc>
+    </method>
+    <method name="getHistoryFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the job history file name for a completed job. If job is not 
+ completed or history file not available then return null.]]>
+      </doc>
+    </method>
+    <method name="getNumUsedSlots" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return number of used mapred slots]]>
+      </doc>
+    </method>
+    <method name="setNumUsedSlots"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="n" type="int"/>
+      <doc>
+      <![CDATA[@param n number of used mapred slots]]>
+      </doc>
+    </method>
+    <method name="getNumReservedSlots" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the number of reserved slots]]>
+      </doc>
+    </method>
+    <method name="setNumReservedSlots"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="n" type="int"/>
+      <doc>
+      <![CDATA[@param n the number of reserved slots]]>
+      </doc>
+    </method>
+    <method name="getUsedMem" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the used memory]]>
+      </doc>
+    </method>
+    <method name="setUsedMem"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="m" type="int"/>
+      <doc>
+      <![CDATA[@param m the used memory]]>
+      </doc>
+    </method>
+    <method name="getReservedMem" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the reserved memory]]>
+      </doc>
+    </method>
+    <method name="setReservedMem"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="r" type="int"/>
+      <doc>
+      <![CDATA[@param r the reserved memory]]>
+      </doc>
+    </method>
+    <method name="getNeededMem" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the needed memory]]>
+      </doc>
+    </method>
+    <method name="setNeededMem"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="n" type="int"/>
+      <doc>
+      <![CDATA[@param n the needed memory]]>
+      </doc>
+    </method>
+    <method name="isUber" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Whether job running in uber mode
+ @return job in uber-mode]]>
+      </doc>
+    </method>
+    <method name="setUber"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="isUber" type="boolean"/>
+      <doc>
+      <![CDATA[Set uber-mode flag 
+ @param isUber Whether job running in uber-mode]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Describes the current status of a job.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.JobStatus -->
+  <!-- start interface org.apache.hadoop.mapreduce.MapContext -->
+  <interface name="MapContext"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.TaskInputOutputContext"/>
+    <method name="getInputSplit" return="org.apache.hadoop.mapreduce.InputSplit"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the input split for this map.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The context that is given to the {@link Mapper}.
+ @param <KEYIN> the key input type to the Mapper
+ @param <VALUEIN> the value input type to the Mapper
+ @param <KEYOUT> the key output type from the Mapper
+ @param <VALUEOUT> the value output type from the Mapper]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.MapContext -->
+  <!-- start class org.apache.hadoop.mapreduce.Mapper -->
+  <class name="Mapper" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Mapper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setup"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Called once at the beginning of the task.]]>
+      </doc>
+    </method>
+    <method name="map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="key" type="KEYIN"/>
+      <param name="value" type="VALUEIN"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Called once for each key/value pair in the input split. Most applications
+ should override this, but the default is the identity function.]]>
+      </doc>
+    </method>
+    <method name="cleanup"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Called once at the end of the task.]]>
+      </doc>
+    </method>
+    <method name="run"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Expert users can override this method for more complete control over the
+ execution of the Mapper.
+ @param context
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Maps input key/value pairs to a set of intermediate key/value pairs.  
+ 
+ <p>Maps are the individual tasks which transform input records into a 
+ intermediate records. The transformed intermediate records need not be of 
+ the same type as the input records. A given input pair may map to zero or 
+ many output pairs.</p> 
+ 
+ <p>The Hadoop Map-Reduce framework spawns one map task for each 
+ {@link InputSplit} generated by the {@link InputFormat} for the job.
+ <code>Mapper</code> implementations can access the {@link Configuration} for 
+ the job via the {@link JobContext#getConfiguration()}.
+ 
+ <p>The framework first calls 
+ {@link #setup(org.apache.hadoop.mapreduce.Mapper.Context)}, followed by
+ {@link #map(Object, Object, org.apache.hadoop.mapreduce.Mapper.Context)}
+ for each key/value pair in the <code>InputSplit</code>. Finally 
+ {@link #cleanup(org.apache.hadoop.mapreduce.Mapper.Context)} is called.</p>
+ 
+ <p>All intermediate values associated with a given output key are 
+ subsequently grouped by the framework, and passed to a {@link Reducer} to  
+ determine the final output. Users can control the sorting and grouping by 
+ specifying two key {@link RawComparator} classes.</p>
+
+ <p>The <code>Mapper</code> outputs are partitioned per 
+ <code>Reducer</code>. Users can control which keys (and hence records) go to 
+ which <code>Reducer</code> by implementing a custom {@link Partitioner}.
+ 
+ <p>Users can optionally specify a <code>combiner</code>, via 
+ {@link Job#setCombinerClass(Class)}, to perform local aggregation of the 
+ intermediate outputs, which helps to cut down the amount of data transferred 
+ from the <code>Mapper</code> to the <code>Reducer</code>.
+ 
+ <p>Applications can specify if and how the intermediate
+ outputs are to be compressed and which {@link CompressionCodec}s are to be
+ used via the <code>Configuration</code>.</p>
+  
+ <p>If the job has zero
+ reduces then the output of the <code>Mapper</code> is directly written
+ to the {@link OutputFormat} without sorting by keys.</p>
+ 
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class TokenCounterMapper 
+     extends Mapper&lt;Object, Text, Text, IntWritable&gt;{
+    
+   private final static IntWritable one = new IntWritable(1);
+   private Text word = new Text();
+   
+   public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
+     StringTokenizer itr = new StringTokenizer(value.toString());
+     while (itr.hasMoreTokens()) {
+       word.set(itr.nextToken());
+       context.write(word, one);
+     }
+   }
+ }
+ </pre></blockquote>
+
+ <p>Applications may override the
+ {@link #run(org.apache.hadoop.mapreduce.Mapper.Context)} method to exert
+ greater control on map processing e.g. multi-threaded <code>Mapper</code>s 
+ etc.</p>
+ 
+ @see InputFormat
+ @see JobContext
+ @see Partitioner  
+ @see Reducer]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.Mapper -->
+  <!-- start class org.apache.hadoop.mapreduce.MarkableIterator -->
+  <class name="MarkableIterator" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.MarkableIteratorInterface"/>
+    <constructor name="MarkableIterator" type="java.util.Iterator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new iterator layered on the input iterator
+ @param itr underlying iterator that implements MarkableIteratorInterface]]>
+      </doc>
+    </constructor>
+    <method name="mark"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="clearMark"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="hasNext" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="next" return="VALUE"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="remove"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[<code>MarkableIterator</code> is a wrapper iterator class that 
+ implements the {@link MarkableIteratorInterface}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.MarkableIterator -->
+  <!-- start class org.apache.hadoop.mapreduce.OutputCommitter -->
+  <class name="OutputCommitter" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="OutputCommitter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setupJob"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[For the framework to setup the job output during initialization.  This is
+ called from the application master process for the entire job. This will be
+ called multiple times, once per job attempt.
+ 
+ @param jobContext Context of the job whose output is being written.
+ @throws IOException if temporary output could not be created]]>
+      </doc>
+    </method>
+    <method name="cleanupJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #commitJob(JobContext)} and
+                 {@link #abortJob(JobContext, JobStatus.State)} instead.">
+      <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[For cleaning up the job's output after job completion.  This is called
+ from the application master process for the entire job. This may be called
+ multiple times.
+ 
+ @param jobContext Context of the job whose output is being written.
+ @throws IOException
+ @deprecated Use {@link #commitJob(JobContext)} and
+                 {@link #abortJob(JobContext, JobStatus.State)} instead.]]>
+      </doc>
+    </method>
+    <method name="commitJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[For committing job's output after successful job completion. Note that this
+ is invoked for jobs with final runstate as SUCCESSFUL.  This is called
+ from the application master process for the entire job. This is guaranteed
+ to only be called once.  If it throws an exception the entire job will
+ fail.	
+ 
+ @param jobContext Context of the job whose output is being written.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="abortJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <param name="state" type="org.apache.hadoop.mapreduce.JobStatus.State"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[For aborting an unsuccessful job's output. Note that this is invoked for 
+ jobs with final runstate as {@link JobStatus.State#FAILED} or 
+ {@link JobStatus.State#KILLED}.  This is called from the application
+ master process for the entire job. This may be called multiple times.
+
+ @param jobContext Context of the job whose output is being written.
+ @param state final runstate of the job
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setupTask"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Sets up output for the task.  This is called from each individual task's
+ process that will output to HDFS, and it is called just for that task. This
+ may be called multiple times for the same task, but for different task
+ attempts.
+ 
+ @param taskContext Context of the task whose output is being written.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="needsTaskCommit" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Check whether task needs a commit.  This is called from each individual
+ task's process that will output to HDFS, and it is called just for that
+ task.
+ 
+ @param taskContext
+ @return true/false
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="commitTask"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[To promote the task's temporary output to final output location.
+ If {@link #needsTaskCommit(TaskAttemptContext)} returns true and this
+ task is the task that the AM determines finished first, this method
+ is called to commit an individual task's output.  This is to mark
+ that tasks output as complete, as {@link #commitJob(JobContext)} will 
+ also be called later on if the entire job finished successfully. This
+ is called from a task's process. This may be called multiple times for the
+ same task, but different task attempts.  It should be very rare for this to
+ be called multiple times and requires odd networking failures to make this
+ happen. In the future the Hadoop framework may eliminate this race.
+ 
+ @param taskContext Context of the task whose output is being written.
+ @throws IOException if commit is not successful.]]>
+      </doc>
+    </method>
+    <method name="abortTask"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Discard the task output. This is called from a task's process to clean 
+ up a single task's output that can not yet been committed. This may be
+ called multiple times for the same task, but for different task attempts.
+ 
+ @param taskContext
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="isRecoverySupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #isRecoverySupported(JobContext)} instead.">
+      <doc>
+      <![CDATA[Is task output recovery supported for restarting jobs?
+ 
+ If task output recovery is supported, job restart can be done more
+ efficiently.
+ 
+ @return <code>true</code> if task output recovery is supported,
+         <code>false</code> otherwise
+ @see #recoverTask(TaskAttemptContext)
+ @deprecated Use {@link #isRecoverySupported(JobContext)} instead.]]>
+      </doc>
+    </method>
+    <method name="isCommitJobRepeatable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns true if an in-progress job commit can be retried. If the MR AM is
+ re-run then it will check this value to determine if it can retry an
+ in-progress commit that was started by a previous version.
+ Note that in rare scenarios, the previous AM version might still be running
+ at that time, due to system anomalies. Hence if this method returns true
+ then the retry commit operation should be able to run concurrently with
+ the previous operation.
+
+ If repeatable job commit is supported, job restart can tolerate previous
+ AM failures during job commit.
+
+ By default, it is not supported. Extended classes (like:
+ FileOutputCommitter) should explicitly override it if provide support.
+
+ @param jobContext
+          Context of the job whose output is being written.
+ @return <code>true</code> repeatable job commit is supported,
+         <code>false</code> otherwise
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="isRecoverySupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Is task output recovery supported for restarting jobs?
+ 
+ If task output recovery is supported, job restart can be done more
+ efficiently.
+ 
+ @param jobContext
+          Context of the job whose output is being written.
+ @return <code>true</code> if task output recovery is supported,
+         <code>false</code> otherwise
+ @throws IOException
+ @see #recoverTask(TaskAttemptContext)]]>
+      </doc>
+    </method>
+    <method name="recoverTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Recover the task output. 
+ 
+ The retry-count for the job will be passed via the 
+ {@link MRJobConfig#APPLICATION_ATTEMPT_ID} key in  
+ {@link TaskAttemptContext#getConfiguration()} for the 
+ <code>OutputCommitter</code>.  This is called from the application master
+ process, but it is called individually for each task.
+ 
+ If an exception is thrown the task will be attempted again. 
+ 
+ This may be called multiple times for the same task.  But from different
+ application attempts.
+ 
+ @param taskContext Context of the task whose output is being recovered
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<code>OutputCommitter</code> describes the commit of task output for a 
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputCommitter</code> of 
+ the job to:<p>
+ <ol>
+   <li>
+   Setup the job during initialization. For example, create the temporary 
+   output directory for the job during the initialization of the job.
+   </li>
+   <li>
+   Cleanup the job after the job completion. For example, remove the
+   temporary output directory after the job completion. 
+   </li>
+   <li>
+   Setup the task temporary output.
+   </li> 
+   <li>
+   Check whether a task needs a commit. This is to avoid the commit
+   procedure if a task does not need commit.
+   </li>
+   <li>
+   Commit of the task output.
+   </li>  
+   <li>
+   Discard the task commit.
+   </li>
+ </ol>
+ The methods in this class can be called from several different processes and
+ from several different contexts.  It is important to know which process and
+ which context each is called from.  Each method should be marked accordingly
+ in its documentation.  It is also important to note that not all methods are
+ guaranteed to be called once and only once.  If a method is not guaranteed to
+ have this property the output committer needs to handle this appropriately. 
+ Also note it will only be in rare situations where they may be called 
+ multiple times for the same task.
+ 
+ @see org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter 
+ @see JobContext
+ @see TaskAttemptContext]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.OutputCommitter -->
+  <!-- start class org.apache.hadoop.mapreduce.OutputFormat -->
+  <class name="OutputFormat" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="OutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapreduce.RecordWriter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the {@link RecordWriter} for the given task.
+
+ @param context the information about the current task.
+ @return a {@link RecordWriter} to write the output for the job.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="checkOutputSpecs"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Check for validity of the output-specification for the job.
+  
+ <p>This is to validate the output specification for the job when it is
+ a job is submitted.  Typically checks that it does not already exist,
+ throwing an exception when it already exists, so that output is not
+ overwritten.</p>
+
+ Implementations which write to filesystems which support delegation
+ tokens usually collect the tokens for the destination path(s)
+ and attach them to the job context's JobConf.
+ @param context information about the job
+ @throws IOException when output should not be attempted]]>
+      </doc>
+    </method>
+    <method name="getOutputCommitter" return="org.apache.hadoop.mapreduce.OutputCommitter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the output committer for this output format. This is responsible
+ for ensuring the output is committed correctly.
+ @param context the task context
+ @return an output committer
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<code>OutputFormat</code> describes the output-specification for a 
+ Map-Reduce job.
+
+ <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of the
+ job to:<p>
+ <ol>
+   <li>
+   Validate the output-specification of the job. For e.g. check that the 
+   output directory doesn't already exist. 
+   <li>
+   Provide the {@link RecordWriter} implementation to be used to write out
+   the output files of the job. Output files are stored in a 
+   {@link FileSystem}.
+   </li>
+ </ol>
+ 
+ @see RecordWriter]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.OutputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.Partitioner -->
+  <class name="Partitioner" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Partitioner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getPartition" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="KEY"/>
+      <param name="value" type="VALUE"/>
+      <param name="numPartitions" type="int"/>
+      <doc>
+      <![CDATA[Get the partition number for a given key (hence record) given the total 
+ number of partitions i.e. number of reduce-tasks for the job.
+   
+ <p>Typically a hash function on a all or a subset of the key.</p>
+
+ @param key the key to be partioned.
+ @param value the entry value.
+ @param numPartitions the total number of partitions.
+ @return the partition number for the <code>key</code>.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Partitions the key space.
+ 
+ <p><code>Partitioner</code> controls the partitioning of the keys of the 
+ intermediate map-outputs. The key (or a subset of the key) is used to derive
+ the partition, typically by a hash function. The total number of partitions
+ is the same as the number of reduce tasks for the job. Hence this controls
+ which of the <code>m</code> reduce tasks the intermediate key (and hence the 
+ record) is sent for reduction.</p>
+
+ <p>Note: A <code>Partitioner</code> is created only when there are multiple
+ reducers.</p>
+
+ <p>Note: If you require your Partitioner class to obtain the Job's
+ configuration object, implement the {@link Configurable} interface.</p>
+ 
+ @see Reducer]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.Partitioner -->
+  <!-- start class org.apache.hadoop.mapreduce.QueueAclsInfo -->
+  <class name="QueueAclsInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="QueueAclsInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor for QueueAclsInfo.]]>
+      </doc>
+    </constructor>
+    <constructor name="QueueAclsInfo" type="java.lang.String, java.lang.String[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a new QueueAclsInfo object using the queue name and the
+ queue operations array
+ 
+ @param queueName Name of the job queue
+ @param operations]]>
+      </doc>
+    </constructor>
+    <method name="getQueueName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get queue name.
+ 
+ @return name]]>
+      </doc>
+    </method>
+    <method name="setQueueName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="queueName" type="java.lang.String"/>
+    </method>
+    <method name="getOperations" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get opearations allowed on queue.
+ 
+ @return array of String]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Class to encapsulate Queue ACLs for a particular
+  user.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.QueueAclsInfo -->
+  <!-- start class org.apache.hadoop.mapreduce.QueueInfo -->
+  <class name="QueueInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="QueueInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor for QueueInfo.]]>
+      </doc>
+    </constructor>
+    <constructor name="QueueInfo" type="java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a new QueueInfo object using the queue name and the
+ scheduling information passed.
+ 
+ @param queueName Name of the job queue
+ @param schedulingInfo Scheduling Information associated with the job
+ queue]]>
+      </doc>
+    </constructor>
+    <constructor name="QueueInfo" type="java.lang.String, java.lang.String, org.apache.hadoop.mapreduce.QueueState, org.apache.hadoop.mapreduce.JobStatus[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@param queueName
+ @param schedulingInfo
+ @param state
+ @param stats]]>
+      </doc>
+    </constructor>
+    <method name="setQueueName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="queueName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the queue name of the JobQueueInfo
+ 
+ @param queueName Name of the job queue.]]>
+      </doc>
+    </method>
+    <method name="getQueueName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the queue name from JobQueueInfo
+ 
+ @return queue name]]>
+      </doc>
+    </method>
+    <method name="setSchedulingInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="schedulingInfo" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the scheduling information associated to particular job queue
+ 
+ @param schedulingInfo]]>
+      </doc>
+    </method>
+    <method name="getSchedulingInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Gets the scheduling information associated to particular job queue.
+ If nothing is set would return <b>"N/A"</b>
+ 
+ @return Scheduling information associated to particular Job Queue]]>
+      </doc>
+    </method>
+    <method name="setState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="state" type="org.apache.hadoop.mapreduce.QueueState"/>
+      <doc>
+      <![CDATA[Set the state of the queue
+ @param state state of the queue.]]>
+      </doc>
+    </method>
+    <method name="getState" return="org.apache.hadoop.mapreduce.QueueState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the queue state
+ @return the queue state.]]>
+      </doc>
+    </method>
+    <method name="setJobStatuses"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="stats" type="org.apache.hadoop.mapreduce.JobStatus[]"/>
+    </method>
+    <method name="getQueueChildren" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get immediate children.
+ 
+ @return list of QueueInfo]]>
+      </doc>
+    </method>
+    <method name="setQueueChildren"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="children" type="java.util.List"/>
+    </method>
+    <method name="getProperties" return="java.util.Properties"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get properties.
+ 
+ @return Properties]]>
+      </doc>
+    </method>
+    <method name="setProperties"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="props" type="java.util.Properties"/>
+    </method>
+    <method name="getJobStatuses" return="org.apache.hadoop.mapreduce.JobStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the jobs submitted to queue
+ @return list of JobStatus for the submitted jobs]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Class that contains the information regarding the Job Queues which are 
+ maintained by the Hadoop Map/Reduce framework.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.QueueInfo -->
+  <!-- start class org.apache.hadoop.mapreduce.QueueState -->
+  <class name="QueueState" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.mapreduce.QueueState[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.mapreduce.QueueState"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="getStateName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the stateName]]>
+      </doc>
+    </method>
+    <method name="getState" return="org.apache.hadoop.mapreduce.QueueState"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="state" type="java.lang.String"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Enum representing queue state]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.QueueState -->
+  <!-- start class org.apache.hadoop.mapreduce.RecordReader -->
+  <class name="RecordReader" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Closeable"/>
+    <constructor name="RecordReader"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="initialize"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Called once at initialization.
+ @param split the split that defines the range of records to read
+ @param context the information about the task
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="nextKeyValue" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Read the next key, value pair.
+ @return true if a key/value pair was read
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getCurrentKey" return="KEYIN"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the current key
+ @return the current key or null if there is no current key
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getCurrentValue" return="VALUEIN"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the current value.
+ @return the object that was read
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[The current progress of the record reader through its data.
+ @return a number between 0.0 and 1.0 that is the fraction of the data read
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Close the record reader.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The record reader breaks the data into key/value pairs for input to the
+ {@link Mapper}.
+ @param <KEYIN>
+ @param <VALUEIN>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.RecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.RecordWriter -->
+  <class name="RecordWriter" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RecordWriter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="write"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Writes a key/value pair.
+
+ @param key the key to write.
+ @param value the value to write.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Close this <code>RecordWriter</code> to future operations.
+ 
+ @param context the context of the task
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<code>RecordWriter</code> writes the output &lt;key, value&gt; pairs 
+ to an output file.
+ 
+ <p><code>RecordWriter</code> implementations write the job outputs to the
+ {@link FileSystem}.
+ 
+ @see OutputFormat]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.RecordWriter -->
+  <!-- start interface org.apache.hadoop.mapreduce.ReduceContext -->
+  <interface name="ReduceContext"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.TaskInputOutputContext"/>
+    <method name="nextKey" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Start processing next unique key.]]>
+      </doc>
+    </method>
+    <method name="getValues" return="java.lang.Iterable"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Iterate through the values for the current key, reusing the same value 
+ object, which is stored in the context.
+ @return the series of values associated with the current key. All of the 
+ objects returned directly and indirectly from this method are reused.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The context passed to the {@link Reducer}.
+ @param <KEYIN> the class of the input keys
+ @param <VALUEIN> the class of the input values
+ @param <KEYOUT> the class of the output keys
+ @param <VALUEOUT> the class of the output values]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.ReduceContext -->
+  <!-- start class org.apache.hadoop.mapreduce.Reducer -->
+  <class name="Reducer" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Reducer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setup"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Called once at the start of the task.]]>
+      </doc>
+    </method>
+    <method name="reduce"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="key" type="KEYIN"/>
+      <param name="values" type="java.lang.Iterable"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[This method is called once for each key. Most applications will define
+ their reduce class by overriding this method. The default implementation
+ is an identity function.]]>
+      </doc>
+    </method>
+    <method name="cleanup"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Called once at the end of the task.]]>
+      </doc>
+    </method>
+    <method name="run"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Advanced application writers can use the 
+ {@link #run(org.apache.hadoop.mapreduce.Reducer.Context)} method to
+ control how the reduce task works.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Reduces a set of intermediate values which share a key to a smaller set of
+ values.  
+ 
+ <p><code>Reducer</code> implementations 
+ can access the {@link Configuration} for the job via the 
+ {@link JobContext#getConfiguration()} method.</p>
+
+ <p><code>Reducer</code> has 3 primary phases:</p>
+ <ol>
+   <li>
+   
+   <b id="Shuffle">Shuffle</b>
+   
+   <p>The <code>Reducer</code> copies the sorted output from each 
+   {@link Mapper} using HTTP across the network.</p>
+   </li>
+   
+   <li>
+   <b id="Sort">Sort</b>
+   
+   <p>The framework merge sorts <code>Reducer</code> inputs by 
+   <code>key</code>s 
+   (since different <code>Mapper</code>s may have output the same key).</p>
+   
+   <p>The shuffle and sort phases occur simultaneously i.e. while outputs are
+   being fetched they are merged.</p>
+      
+   <b id="SecondarySort">SecondarySort</b>
+   
+   <p>To achieve a secondary sort on the values returned by the value 
+   iterator, the application should extend the key with the secondary
+   key and define a grouping comparator. The keys will be sorted using the
+   entire key, but will be grouped using the grouping comparator to decide
+   which keys and values are sent in the same call to reduce.The grouping 
+   comparator is specified via 
+   {@link Job#setGroupingComparatorClass(Class)}. The sort order is
+   controlled by 
+   {@link Job#setSortComparatorClass(Class)}.</p>
+   
+   
+   For example, say that you want to find duplicate web pages and tag them 
+   all with the url of the "best" known example. You would set up the job 
+   like:
+   <ul>
+     <li>Map Input Key: url</li>
+     <li>Map Input Value: document</li>
+     <li>Map Output Key: document checksum, url pagerank</li>
+     <li>Map Output Value: url</li>
+     <li>Partitioner: by checksum</li>
+     <li>OutputKeyComparator: by checksum and then decreasing pagerank</li>
+     <li>OutputValueGroupingComparator: by checksum</li>
+   </ul>
+   </li>
+   
+   <li>   
+   <b id="Reduce">Reduce</b>
+   
+   <p>In this phase the 
+   {@link #reduce(Object, Iterable, org.apache.hadoop.mapreduce.Reducer.Context)}
+   method is called for each <code>&lt;key, (collection of values)&gt;</code> in
+   the sorted inputs.</p>
+   <p>The output of the reduce task is typically written to a 
+   {@link RecordWriter} via 
+   {@link Context#write(Object, Object)}.</p>
+   </li>
+ </ol>
+ 
+ <p>The output of the <code>Reducer</code> is <b>not re-sorted</b>.</p>
+ 
+ <p>Example:</p>
+ <p><blockquote><pre>
+ public class IntSumReducer&lt;Key&gt; extends Reducer&lt;Key,IntWritable,
+                                                 Key,IntWritable&gt; {
+   private IntWritable result = new IntWritable();
+ 
+   public void reduce(Key key, Iterable&lt;IntWritable&gt; values,
+                      Context context) throws IOException, InterruptedException {
+     int sum = 0;
+     for (IntWritable val : values) {
+       sum += val.get();
+     }
+     result.set(sum);
+     context.write(key, result);
+   }
+ }
+ </pre></blockquote>
+ 
+ @see Mapper
+ @see Partitioner]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.Reducer -->
+  <!-- start interface org.apache.hadoop.mapreduce.TaskAttemptContext -->
+  <interface name="TaskAttemptContext"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.JobContext"/>
+    <implements name="org.apache.hadoop.util.Progressable"/>
+    <method name="getTaskAttemptID" return="org.apache.hadoop.mapreduce.TaskAttemptID"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the unique name for this task attempt.]]>
+      </doc>
+    </method>
+    <method name="setStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="msg" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the current status of the task to the given string.]]>
+      </doc>
+    </method>
+    <method name="getStatus" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the last set status message.
+ @return the current status message]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The current progress of the task attempt.
+ @return a number between 0.0 and 1.0 (inclusive) indicating the attempt's
+ progress.]]>
+      </doc>
+    </method>
+    <method name="getCounter" return="org.apache.hadoop.mapreduce.Counter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.Enum"/>
+      <doc>
+      <![CDATA[Get the {@link Counter} for the given <code>counterName</code>.
+ @param counterName counter name
+ @return the <code>Counter</code> for the given <code>counterName</code>]]>
+      </doc>
+    </method>
+    <method name="getCounter" return="org.apache.hadoop.mapreduce.Counter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="groupName" type="java.lang.String"/>
+      <param name="counterName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the {@link Counter} for the given <code>groupName</code> and 
+ <code>counterName</code>.
+ @param counterName counter name
+ @return the <code>Counter</code> for the given <code>groupName</code> and 
+         <code>counterName</code>]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The context for task attempts.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.TaskAttemptContext -->
+  <!-- start class org.apache.hadoop.mapreduce.TaskAttemptID -->
+  <class name="TaskAttemptID" extends="org.apache.hadoop.mapred.ID"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TaskAttemptID" type="org.apache.hadoop.mapreduce.TaskID, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a TaskAttemptID object from given {@link TaskID}.  
+ @param taskId TaskID that this task belongs to  
+ @param id the task attempt number]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskAttemptID" type="java.lang.String, int, org.apache.hadoop.mapreduce.TaskType, int, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a TaskId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number 
+ @param type the TaskType 
+ @param taskId taskId number
+ @param id the task attempt number]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskAttemptID" type="java.lang.String, int, boolean, int, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a TaskId object from given parts.
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param taskId taskId number
+ @param id the task attempt number]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskAttemptID"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getJobID" return="org.apache.hadoop.mapreduce.JobID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the {@link JobID} object that this task attempt belongs to]]>
+      </doc>
+    </method>
+    <method name="getTaskID" return="org.apache.hadoop.mapreduce.TaskID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the {@link TaskID} object that this task attempt belongs to]]>
+      </doc>
+    </method>
+    <method name="isMap" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns whether this TaskID is a map ID]]>
+      </doc>
+    </method>
+    <method name="getTaskType" return="org.apache.hadoop.mapreduce.TaskType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the TaskType of the TaskAttemptID]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="appendTo" return="java.lang.StringBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="builder" type="java.lang.StringBuilder"/>
+      <doc>
+      <![CDATA[Add the unique string to the StringBuilder
+ @param builder the builder to append ot
+ @return the builder that was passed in.]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.mapreduce.ID"/>
+      <doc>
+      <![CDATA[Compare TaskIds by first tipIds, then by task numbers.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="forName" return="org.apache.hadoop.mapreduce.TaskAttemptID"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="str" type="java.lang.String"/>
+      <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+      <doc>
+      <![CDATA[Construct a TaskAttemptID object from given string 
+ @return constructed TaskAttemptID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+      </doc>
+    </method>
+    <field name="ATTEMPT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[TaskAttemptID represents the immutable and unique identifier for 
+ a task attempt. Each task attempt is one particular instance of a Map or
+ Reduce Task identified by its TaskID. 
+ 
+ TaskAttemptID consists of 2 parts. First part is the 
+ {@link TaskID}, that this TaskAttemptID belongs to.
+ Second part is the task attempt number. <br> 
+ An example TaskAttemptID is : 
+ <code>attempt_200707121733_0003_m_000005_0</code> , which represents the
+ zeroth task attempt for the fifth map task in the third job 
+ running at the jobtracker started at <code>200707121733</code>.
+ <p>
+ Applications should never construct or parse TaskAttemptID strings
+ , but rather use appropriate constructors or {@link #forName(String)} 
+ method. 
+ 
+ @see JobID
+ @see TaskID]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.TaskAttemptID -->
+  <!-- start class org.apache.hadoop.mapreduce.TaskCompletionEvent -->
+  <class name="TaskCompletionEvent" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="TaskCompletionEvent"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor for Writable.]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskCompletionEvent" type="int, org.apache.hadoop.mapreduce.TaskAttemptID, int, boolean, org.apache.hadoop.mapreduce.TaskCompletionEvent.Status, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor. eventId should be created externally and incremented
+ per event for each job. 
+ @param eventId event id, event id should be unique and assigned in
+  incrementally, starting from 0. 
+ @param taskId task id
+ @param status task's status 
+ @param taskTrackerHttp task tracker's host:port for http.]]>
+      </doc>
+    </constructor>
+    <method name="getEventId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns event Id. 
+ @return event id]]>
+      </doc>
+    </method>
+    <method name="getTaskAttemptId" return="org.apache.hadoop.mapreduce.TaskAttemptID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns task id. 
+ @return task id]]>
+      </doc>
+    </method>
+    <method name="getStatus" return="org.apache.hadoop.mapreduce.TaskCompletionEvent.Status"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns {@link Status}
+ @return task completion status]]>
+      </doc>
+    </method>
+    <method name="getTaskTrackerHttp" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[http location of the tasktracker where this task ran. 
+ @return http location of tasktracker user logs]]>
+      </doc>
+    </method>
+    <method name="getTaskRunTime" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns time (in millisec) the task took to complete.]]>
+      </doc>
+    </method>
+    <method name="setTaskRunTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="taskCompletionTime" type="int"/>
+      <doc>
+      <![CDATA[Set the task completion time
+ @param taskCompletionTime time (in millisec) the task took to complete]]>
+      </doc>
+    </method>
+    <method name="setEventId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="eventId" type="int"/>
+      <doc>
+      <![CDATA[set event Id. should be assigned incrementally starting from 0. 
+ @param eventId]]>
+      </doc>
+    </method>
+    <method name="setTaskAttemptId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="taskId" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
+      <doc>
+      <![CDATA[Sets task id. 
+ @param taskId]]>
+      </doc>
+    </method>
+    <method name="setTaskStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="status" type="org.apache.hadoop.mapreduce.TaskCompletionEvent.Status"/>
+      <doc>
+      <![CDATA[Set task status. 
+ @param status]]>
+      </doc>
+    </method>
+    <method name="setTaskTrackerHttp"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="taskTrackerHttp" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set task tracker http location. 
+ @param taskTrackerHttp]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isMapTask" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="idWithinJob" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="EMPTY_ARRAY" type="org.apache.hadoop.mapreduce.TaskCompletionEvent[]"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This is used to track task completion events on 
+ job tracker.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.TaskCompletionEvent -->
+  <!-- start class org.apache.hadoop.mapreduce.TaskCompletionEvent.Status -->
+  <class name="TaskCompletionEvent.Status" extends="java.lang.Enum"
+    abstract="false"
+    static="true" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.mapreduce.TaskCompletionEvent.Status[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.mapreduce.TaskCompletionEvent.Status"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.TaskCompletionEvent.Status -->
+  <!-- start class org.apache.hadoop.mapreduce.TaskCounter -->
+  <class name="TaskCounter" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.mapreduce.TaskCounter[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.mapreduce.TaskCounter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.TaskCounter -->
+  <!-- start class org.apache.hadoop.mapreduce.TaskID -->
+  <class name="TaskID" extends="org.apache.hadoop.mapred.ID"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TaskID" type="org.apache.hadoop.mapreduce.JobID, org.apache.hadoop.mapreduce.TaskType, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a TaskID object from given {@link JobID}.
+
+ @param jobId JobID that this tip belongs to 
+ @param type the {@link TaskType} of the task 
+ @param id the tip number]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskID" type="java.lang.String, int, org.apache.hadoop.mapreduce.TaskType, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a TaskInProgressId object from given parts.
+
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number 
+ @param type the TaskType 
+ @param id the tip number]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskID" type="org.apache.hadoop.mapreduce.JobID, boolean, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a TaskID object from given {@link JobID}.
+
+ @param jobId JobID that this tip belongs to
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskID" type="java.lang.String, int, boolean, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a TaskInProgressId object from given parts.
+
+ @param jtIdentifier jobTracker identifier
+ @param jobId job number
+ @param isMap whether the tip is a map
+ @param id the tip number]]>
+      </doc>
+    </constructor>
+    <constructor name="TaskID"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor for Writable. Sets the task type to
+ {@link TaskType#REDUCE}, the ID to 0, and the job ID to an empty job ID.]]>
+      </doc>
+    </constructor>
+    <method name="getJobID" return="org.apache.hadoop.mapreduce.JobID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the {@link JobID} object that this tip belongs to.
+
+ @return the JobID object]]>
+      </doc>
+    </method>
+    <method name="isMap" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns whether this TaskID is a map ID.
+
+ @return whether this TaskID is a map ID]]>
+      </doc>
+    </method>
+    <method name="getTaskType" return="org.apache.hadoop.mapreduce.TaskType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the type of the task.
+
+ @return the type of the task]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="org.apache.hadoop.mapreduce.ID"/>
+      <doc>
+      <![CDATA[Compare TaskInProgressIds by first jobIds, then by tip numbers.
+ Reducers are defined as greater than mappers.
+
+ @param o the TaskID against which to compare
+ @return 0 if equal, positive if this TaskID is greater, and negative if
+ this TaskID is less]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="appendTo" return="java.lang.StringBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="builder" type="java.lang.StringBuilder"/>
+      <doc>
+      <![CDATA[Add the unique string to the given builder.
+
+ @param builder the builder to append to
+ @return the builder that was passed in]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="forName" return="org.apache.hadoop.mapreduce.TaskID"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="str" type="java.lang.String"/>
+      <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/>
+      <doc>
+      <![CDATA[Construct a TaskID object from given string.
+
+ @param str the target string
+ @return constructed TaskID object or null if the given String is null
+ @throws IllegalArgumentException if the given string is malformed]]>
+      </doc>
+    </method>
+    <method name="getRepresentingCharacter" return="char"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="org.apache.hadoop.mapreduce.TaskType"/>
+      <doc>
+      <![CDATA[Gets the character representing the {@link TaskType}.
+
+ @param type the TaskType
+ @return the character]]>
+      </doc>
+    </method>
+    <method name="getTaskType" return="org.apache.hadoop.mapreduce.TaskType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="c" type="char"/>
+      <doc>
+      <![CDATA[Gets the {@link TaskType} corresponding to the character.
+
+ @param c the character
+ @return the TaskType]]>
+      </doc>
+    </method>
+    <method name="getAllTaskTypes" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns a string of characters describing all possible {@link TaskType}
+ values
+
+ @return a string of all task type characters]]>
+      </doc>
+    </method>
+    <field name="TASK" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="idFormat" type="java.text.NumberFormat"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="TASK_ID_REGEX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="taskIdPattern" type="java.util.regex.Pattern"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[TaskID represents the immutable and unique identifier for 
+ a Map or Reduce Task. Each TaskID encompasses multiple attempts made to
+ execute the Map or Reduce Task, each of which are uniquely indentified by
+ their TaskAttemptID.
+ 
+ TaskID consists of 3 parts. First part is the {@link JobID}, that this 
+ TaskInProgress belongs to. Second part of the TaskID is either 'm' or 'r' 
+ representing whether the task is a map task or a reduce task. 
+ And the third part is the task number. <br> 
+ An example TaskID is : 
+ <code>task_200707121733_0003_m_000005</code> , which represents the
+ fifth map task in the third job running at the jobtracker 
+ started at <code>200707121733</code>. 
+ <p>
+ Applications should never construct or parse TaskID strings
+ , but rather use appropriate constructors or {@link #forName(String)} 
+ method. 
+ 
+ @see JobID
+ @see TaskAttemptID]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.TaskID -->
+  <!-- start interface org.apache.hadoop.mapreduce.TaskInputOutputContext -->
+  <interface name="TaskInputOutputContext"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+    <method name="nextKeyValue" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Advance to the next key, value pair, returning null if at end.
+ @return the key object that was read into, or null if no more]]>
+      </doc>
+    </method>
+    <method name="getCurrentKey" return="KEYIN"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the current key.
+ @return the current key object or null if there isn't one
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="getCurrentValue" return="VALUEIN"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the current value.
+ @return the value object that was read into
+ @throws IOException
+ @throws InterruptedException]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="KEYOUT"/>
+      <param name="value" type="VALUEOUT"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Generate an output key/value pair.]]>
+      </doc>
+    </method>
+    <method name="getOutputCommitter" return="org.apache.hadoop.mapreduce.OutputCommitter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link OutputCommitter} for the task-attempt.
+ @return the <code>OutputCommitter</code> for the task-attempt]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A context object that allows input and output from the task. It is only
+ supplied to the {@link Mapper} or {@link Reducer}.
+ @param <KEYIN> the input key type for the task
+ @param <VALUEIN> the input value type for the task
+ @param <KEYOUT> the output key type for the task
+ @param <VALUEOUT> the output value type for the task]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.TaskInputOutputContext -->
+  <!-- start class org.apache.hadoop.mapreduce.TaskTrackerInfo -->
+  <class name="TaskTrackerInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="TaskTrackerInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="TaskTrackerInfo" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="TaskTrackerInfo" type="java.lang.String, java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getTaskTrackerName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Gets the tasktracker's name.
+ 
+ @return tracker's name.]]>
+      </doc>
+    </method>
+    <method name="isBlacklisted" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Whether tracker is blacklisted
+ @return true if tracker is blacklisted
+         false otherwise]]>
+      </doc>
+    </method>
+    <method name="getReasonForBlacklist" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Gets the reason for which the tasktracker was blacklisted.
+ 
+ @return reason which tracker was blacklisted]]>
+      </doc>
+    </method>
+    <method name="getBlacklistReport" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Gets a descriptive report about why the tasktracker was blacklisted.
+ 
+ @return report describing why the tasktracker was blacklisted.]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Information about TaskTracker.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.TaskTrackerInfo -->
+  <!-- start class org.apache.hadoop.mapreduce.TaskType -->
+  <class name="TaskType" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.mapreduce.TaskType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.mapreduce.TaskType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Enum for map, reduce, job-setup, job-cleanup, task-cleanup task types.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.TaskType -->
+</package>
+<package name="org.apache.hadoop.mapreduce.checkpoint">
+</package>
+<package name="org.apache.hadoop.mapreduce.counters">
+  <!-- start class org.apache.hadoop.mapreduce.counters.AbstractCounters -->
+  <class name="AbstractCounters" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <implements name="java.lang.Iterable"/>
+    <constructor name="AbstractCounters" type="org.apache.hadoop.mapreduce.counters.CounterGroupFactory"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AbstractCounters" type="org.apache.hadoop.mapreduce.counters.AbstractCounters, org.apache.hadoop.mapreduce.counters.CounterGroupFactory"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct from another counters object.
+ @param <C1> type of the other counter
+ @param <G1> type of the other counter group
+ @param counters the counters object to copy
+ @param groupFactory the factory for new groups]]>
+      </doc>
+    </constructor>
+    <method name="findCounter" return="C"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="groupName" type="java.lang.String"/>
+      <param name="counterName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Find a counter, create one if necessary
+ @param groupName of the counter
+ @param counterName name of the counter
+ @return the matching counter]]>
+      </doc>
+    </method>
+    <method name="findCounter" return="C"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Enum"/>
+      <doc>
+      <![CDATA[Find the counter for the given enum. The same enum will always return the
+ same counter.
+ @param key the counter key
+ @return the matching counter object]]>
+      </doc>
+    </method>
+    <method name="getGroupNames" return="java.lang.Iterable"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the names of all counter classes.
+ @return Set of counter names.]]>
+      </doc>
+    </method>
+    <method name="iterator" return="java.util.Iterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getGroup" return="G"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="groupName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns the named counter group, or an empty group if there is none
+ with the specified name.
+ @param groupName name of the group
+ @return the group]]>
+      </doc>
+    </method>
+    <method name="countCounters" return="int"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the total number of counters, by summing the number of counters
+ in each group.
+ @return the total number of counters]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write the set of groups.
+ Counters ::= version #fgroups (groupId, group)* #groups (group)*]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return textual representation of the counter values.
+ @return the string]]>
+      </doc>
+    </method>
+    <method name="incrAllCounters"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.mapreduce.counters.AbstractCounters"/>
+      <doc>
+      <![CDATA[Increments multiple counters by their amounts in another Counters
+ instance.
+ @param other the other Counters instance]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericRight" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[An abstract class to provide common implementation for the Counters
+ container in both mapred and mapreduce packages.
+
+ @param <C> type of counter inside the counters
+ @param <G> type of group inside the counters]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.counters.AbstractCounters -->
+  <!-- start interface org.apache.hadoop.mapreduce.counters.CounterGroupBase -->
+  <interface name="CounterGroupBase"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <implements name="java.lang.Iterable"/>
+    <method name="getName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the internal name of the group
+ @return the internal name]]>
+      </doc>
+    </method>
+    <method name="getDisplayName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the display name of the group.
+ @return the human readable name]]>
+      </doc>
+    </method>
+    <method name="setDisplayName"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="displayName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the display name of the group
+ @param displayName of the group]]>
+      </doc>
+    </method>
+    <method name="addCounter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counter" type="T"/>
+      <doc>
+      <![CDATA[Add a counter to this group.
+ @param counter to add]]>
+      </doc>
+    </method>
+    <method name="addCounter" return="T"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="displayName" type="java.lang.String"/>
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Add a counter to this group
+ @param name  of the counter
+ @param displayName of the counter
+ @param value of the counter
+ @return the counter]]>
+      </doc>
+    </method>
+    <method name="findCounter" return="T"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.String"/>
+      <param name="displayName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Find a counter in the group.
+ @param counterName the name of the counter
+ @param displayName the display name of the counter
+ @return the counter that was found or added]]>
+      </doc>
+    </method>
+    <method name="findCounter" return="T"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.String"/>
+      <param name="create" type="boolean"/>
+      <doc>
+      <![CDATA[Find a counter in the group
+ @param counterName the name of the counter
+ @param create create the counter if not found if true
+ @return the counter that was found or added or null if create is false]]>
+      </doc>
+    </method>
+    <method name="findCounter" return="T"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="counterName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Find a counter in the group.
+ @param counterName the name of the counter
+ @return the counter that was found or added]]>
+      </doc>
+    </method>
+    <method name="size" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the number of counters in this group.]]>
+      </doc>
+    </method>
+    <method name="incrAllCounters"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="rightGroup" type="org.apache.hadoop.mapreduce.counters.CounterGroupBase"/>
+      <doc>
+      <![CDATA[Increment all counters by a group of counters
+ @param rightGroup  the group to be added to this group]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The common counter group interface.
+
+ @param <T> type of the counter for the group]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.counters.CounterGroupBase -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.aggregate">
+  <!-- start class org.apache.hadoop.mapreduce.lib.aggregate.DoubleValueSum -->
+  <class name="DoubleValueSum" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregator"/>
+    <constructor name="DoubleValueSum"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default constructor]]>
+      </doc>
+    </constructor>
+    <method name="addNextValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[add a value to the aggregator
+ 
+ @param val
+          an object whose string representation represents a double value.]]>
+      </doc>
+    </method>
+    <method name="addNextValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="double"/>
+      <doc>
+      <![CDATA[add a value to the aggregator
+ 
+ @param val
+          a double value.]]>
+      </doc>
+    </method>
+    <method name="getReport" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the string representation of the aggregated value]]>
+      </doc>
+    </method>
+    <method name="getSum" return="double"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the aggregated value]]>
+      </doc>
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[reset the aggregator]]>
+      </doc>
+    </method>
+    <method name="getCombinerOutput" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return return an array of one element. The element is a string
+         representation of the aggregated value. The return value is
+         expected to be used by the a combiner.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class implements a value aggregator that sums up a sequence of double
+ values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.aggregate.DoubleValueSum -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.aggregate.LongValueMax -->
+  <class name="LongValueMax" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregator"/>
+    <constructor name="LongValueMax"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[the default constructor]]>
+      </doc>
+    </constructor>
+    <method name="addNextValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[add a value to the aggregator
+ 
+ @param val
+          an object whose string representation represents a long value.]]>
+      </doc>
+    </method>
+    <method name="addNextValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newVal" type="long"/>
+      <doc>
+      <![CDATA[add a value to the aggregator
+ 
+ @param newVal
+          a long value.]]>
+      </doc>
+    </method>
+    <method name="getVal" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the aggregated value]]>
+      </doc>
+    </method>
+    <method name="getReport" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the string representation of the aggregated value]]>
+      </doc>
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[reset the aggregator]]>
+      </doc>
+    </method>
+    <method name="getCombinerOutput" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return return an array of one element. The element is a string
+         representation of the aggregated value. The return value is
+         expected to be used by the a combiner.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class implements a value aggregator that maintain the maximum of 
+ a sequence of long values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.aggregate.LongValueMax -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.aggregate.LongValueMin -->
+  <class name="LongValueMin" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregator"/>
+    <constructor name="LongValueMin"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[the default constructor]]>
+      </doc>
+    </constructor>
+    <method name="addNextValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[add a value to the aggregator
+ 
+ @param val
+          an object whose string representation represents a long value.]]>
+      </doc>
+    </method>
+    <method name="addNextValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="newVal" type="long"/>
+      <doc>
+      <![CDATA[add a value to the aggregator
+ 
+ @param newVal
+          a long value.]]>
+      </doc>
+    </method>
+    <method name="getVal" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the aggregated value]]>
+      </doc>
+    </method>
+    <method name="getReport" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the string representation of the aggregated value]]>
+      </doc>
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[reset the aggregator]]>
+      </doc>
+    </method>
+    <method name="getCombinerOutput" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return return an array of one element. The element is a string
+         representation of the aggregated value. The return value is
+         expected to be used by the a combiner.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class implements a value aggregator that maintain the minimum of 
+ a sequence of long values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.aggregate.LongValueMin -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.aggregate.LongValueSum -->
+  <class name="LongValueSum" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregator"/>
+    <constructor name="LongValueSum"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[the default constructor]]>
+      </doc>
+    </constructor>
+    <method name="addNextValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[add a value to the aggregator
+ 
+ @param val
+          an object whose string representation represents a long value.]]>
+      </doc>
+    </method>
+    <method name="addNextValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="long"/>
+      <doc>
+      <![CDATA[add a value to the aggregator
+ 
+ @param val
+          a long value.]]>
+      </doc>
+    </method>
+    <method name="getSum" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the aggregated value]]>
+      </doc>
+    </method>
+    <method name="getReport" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the string representation of the aggregated value]]>
+      </doc>
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[reset the aggregator]]>
+      </doc>
+    </method>
+    <method name="getCombinerOutput" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return return an array of one element. The element is a string
+         representation of the aggregated value. The return value is
+         expected to be used by the a combiner.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class implements a value aggregator that sums up 
+ a sequence of long values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.aggregate.LongValueSum -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.aggregate.StringValueMax -->
+  <class name="StringValueMax" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregator"/>
+    <constructor name="StringValueMax"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[the default constructor]]>
+      </doc>
+    </constructor>
+    <method name="addNextValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[add a value to the aggregator
+ 
+ @param val
+          a string.]]>
+      </doc>
+    </method>
+    <method name="getVal" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the aggregated value]]>
+      </doc>
+    </method>
+    <method name="getReport" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the string representation of the aggregated value]]>
+      </doc>
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[reset the aggregator]]>
+      </doc>
+    </method>
+    <method name="getCombinerOutput" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return return an array of one element. The element is a string
+         representation of the aggregated value. The return value is
+         expected to be used by the a combiner.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class implements a value aggregator that maintain the biggest of 
+ a sequence of strings.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.aggregate.StringValueMax -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.aggregate.StringValueMin -->
+  <class name="StringValueMin" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregator"/>
+    <constructor name="StringValueMin"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[the default constructor]]>
+      </doc>
+    </constructor>
+    <method name="addNextValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[add a value to the aggregator
+ 
+ @param val
+          a string.]]>
+      </doc>
+    </method>
+    <method name="getVal" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the aggregated value]]>
+      </doc>
+    </method>
+    <method name="getReport" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the string representation of the aggregated value]]>
+      </doc>
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[reset the aggregator]]>
+      </doc>
+    </method>
+    <method name="getCombinerOutput" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return return an array of one element. The element is a string
+         representation of the aggregated value. The return value is
+         expected to be used by the a combiner.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class implements a value aggregator that maintain the smallest of 
+ a sequence of strings.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.aggregate.StringValueMin -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.aggregate.UniqValueCount -->
+  <class name="UniqValueCount" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregator"/>
+    <constructor name="UniqValueCount"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[the default constructor]]>
+      </doc>
+    </constructor>
+    <constructor name="UniqValueCount" type="long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[constructor
+ @param maxNum the limit in the number of unique values to keep.]]>
+      </doc>
+    </constructor>
+    <method name="setMaxItems" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="n" type="long"/>
+      <doc>
+      <![CDATA[Set the limit on the number of unique values
+ @param n the desired limit on the number of unique values
+ @return the new limit on the number of unique values]]>
+      </doc>
+    </method>
+    <method name="addNextValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[add a value to the aggregator
+ 
+ @param val
+          an object.]]>
+      </doc>
+    </method>
+    <method name="getReport" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return return the number of unique objects aggregated]]>
+      </doc>
+    </method>
+    <method name="getUniqueItems" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the set of the unique objects]]>
+      </doc>
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[reset the aggregator]]>
+      </doc>
+    </method>
+    <method name="getCombinerOutput" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return return an array of the unique objects. The return value is
+         expected to be used by the a combiner.]]>
+      </doc>
+    </method>
+    <field name="MAX_NUM_UNIQUE_VALUES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class implements a value aggregator that dedupes a sequence of objects.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.aggregate.UniqValueCount -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+  <class name="UserDefinedValueAggregatorDescriptor" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorDescriptor"/>
+    <constructor name="UserDefinedValueAggregatorDescriptor" type="java.lang.String, org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@param className the class name of the user defined descriptor class
+ @param conf a configure object used for decriptor configuration]]>
+      </doc>
+    </constructor>
+    <method name="createInstance" return="java.lang.Object"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="className" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Create an instance of the given class
+ @param className the name of the class
+ @return a dynamically created instance of the given class]]>
+      </doc>
+    </method>
+    <method name="generateKeyValPairs" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Object"/>
+      <param name="val" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Generate a list of aggregation-id/value pairs for the given 
+   key/value pairs by delegating the invocation to the real object.
+   
+ @param key
+          input key
+ @param val
+          input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+         aggregation type which is used to guide the way to aggregate the
+         value in the reduce/combiner phrase of an Aggregate based job.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the string representation of this object.]]>
+      </doc>
+    </method>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Do nothing.]]>
+      </doc>
+    </method>
+    <field name="theAggregatorDescriptor" type="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorDescriptor"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class implements a wrapper for a user defined value 
+ aggregator descriptor.
+ It serves two functions: One is to create an object of 
+ ValueAggregatorDescriptor from the name of a user defined class
+ that may be dynamically loaded. The other is to
+ delegate invocations of generateKeyValPairs function to the created object.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.aggregate.UserDefinedValueAggregatorDescriptor -->
+  <!-- start interface org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregator -->
+  <interface name="ValueAggregator"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="addNextValue"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[add a value to the aggregator
+ 
+ @param val the value to be added]]>
+      </doc>
+    </method>
+    <method name="reset"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[reset the aggregator]]>
+      </doc>
+    </method>
+    <method name="getReport" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the string representation of the agregator]]>
+      </doc>
+    </method>
+    <method name="getCombinerOutput" return="java.util.ArrayList"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return an array of values as the outputs of the combiner.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This interface defines the minimal protocol for value aggregators.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregator -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorBaseDescriptor -->
+  <class name="ValueAggregatorBaseDescriptor" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorDescriptor"/>
+    <constructor name="ValueAggregatorBaseDescriptor"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="generateEntry" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="java.lang.String"/>
+      <param name="id" type="java.lang.String"/>
+      <param name="val" type="org.apache.hadoop.io.Text"/>
+      <doc>
+      <![CDATA[@param type the aggregation type
+ @param id the aggregation id
+ @param val the val associated with the id to be aggregated
+ @return an Entry whose key is the aggregation id prefixed with 
+ the aggregation type.]]>
+      </doc>
+    </method>
+    <method name="generateValueAggregator" return="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregator"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="java.lang.String"/>
+      <param name="uniqCount" type="long"/>
+      <doc>
+      <![CDATA[@param type the aggregation type
+ @param uniqCount the limit in the number of unique values to keep, 
+                  if type is UNIQ_VALUE_COUNT 
+ @return a value aggregator of the given type.]]>
+      </doc>
+    </method>
+    <method name="generateKeyValPairs" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Object"/>
+      <param name="val" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Generate 1 or 2 aggregation-id/value pairs for the given key/value pair.
+ The first id will be of type LONG_VALUE_SUM, with "record_count" as
+ its aggregation id. If the input is a file split,
+ the second id of the same type will be generated too, with the file name 
+ as its aggregation id. This achieves the behavior of counting the total 
+ number of records in the input data, and the number of records 
+ in each input file.
+ 
+ @param key
+          input key
+ @param val
+          input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+         aggregation type which is used to guide the way to aggregate the
+         value in the reduce/combiner phrase of an Aggregate based job.]]>
+      </doc>
+    </method>
+    <method name="configure"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[get the input file name.
+ 
+ @param conf a configuration object]]>
+      </doc>
+    </method>
+    <field name="UNIQ_VALUE_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LONG_VALUE_SUM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DOUBLE_VALUE_SUM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="VALUE_HISTOGRAM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LONG_VALUE_MAX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LONG_VALUE_MIN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="STRING_VALUE_MAX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="STRING_VALUE_MIN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="inputFile" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class implements the common functionalities of 
+ the subclasses of ValueAggregatorDescriptor class.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorBaseDescriptor -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorCombiner -->
+  <class name="ValueAggregatorCombiner" extends="org.apache.hadoop.mapreduce.Reducer"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ValueAggregatorCombiner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="reduce"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.io.Text"/>
+      <param name="values" type="java.lang.Iterable"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Combines values for a given key.  
+ @param key the key is expected to be a Text object, whose prefix indicates
+ the type of aggregation to aggregate the values. 
+ @param values the values to combine
+ @param context to collect combined values]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class implements the generic combiner of Aggregate.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorCombiner -->
+  <!-- start interface org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorDescriptor -->
+  <interface name="ValueAggregatorDescriptor"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="generateKeyValPairs" return="java.util.ArrayList"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Object"/>
+      <param name="val" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Generate a list of aggregation-id/value pairs for 
+ the given key/value pair.
+ This function is usually called by the mapper of an Aggregate based job.
+ 
+ @param key
+          input key
+ @param val
+          input value
+ @return a list of aggregation id/value pairs. An aggregation id encodes an
+         aggregation type which is used to guide the way to aggregate the
+         value in the reduce/combiner phrase of an Aggregate based job.]]>
+      </doc>
+    </method>
+    <method name="configure"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Configure the object
+ 
+ @param conf
+          a Configuration object that may contain the information 
+          that can be used to configure the object.]]>
+      </doc>
+    </method>
+    <field name="TYPE_SEPARATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ONE" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This interface defines the contract a value aggregator descriptor must
+ support. Such a descriptor can be configured with a {@link Configuration}
+ object. Its main function is to generate a list of aggregation-id/value 
+ pairs. An aggregation id encodes an aggregation type which is used to 
+ guide the way to aggregate the value in the reduce/combiner phrase of an
+ Aggregate based job. 
+ The mapper in an Aggregate based map/reduce job may create one or more of
+ ValueAggregatorDescriptor objects at configuration time. For each input
+ key/value pair, the mapper will use those objects to create aggregation
+ id/value pairs.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorDescriptor -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorJob -->
+  <class name="ValueAggregatorJob" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ValueAggregatorJob"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapreduce.lib.jobcontrol.JobControl"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <param name="descriptors" type="java.lang.Class[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapreduce.lib.jobcontrol.JobControl"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createValueAggregatorJob" return="org.apache.hadoop.mapreduce.Job"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create an Aggregate based map/reduce job.
+ 
+ @param conf The configuration for job
+ @param args the arguments used for job creation. Generic hadoop
+ arguments are accepted.
+ @return a Job object ready for submission.
+ 
+ @throws IOException
+ @see GenericOptionsParser]]>
+      </doc>
+    </method>
+    <method name="createValueAggregatorJob" return="org.apache.hadoop.mapreduce.Job"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <param name="descriptors" type="java.lang.Class[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setAggregatorDescriptors" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="descriptors" type="java.lang.Class[]"/>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+      <doc>
+      <![CDATA[create and run an Aggregate based map/reduce job.
+ 
+ @param args the arguments used for job creation
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the main class for creating a map/reduce job using Aggregate
+ framework. The Aggregate is a specialization of map/reduce framework,
+ specializing for performing various simple aggregations.
+ 
+ Generally speaking, in order to implement an application using Map/Reduce
+ model, the developer is to implement Map and Reduce functions (and possibly
+ combine function). However, a lot of applications related to counting and
+ statistics computing have very similar characteristics. Aggregate abstracts
+ out the general patterns of these functions and implementing those patterns.
+ In particular, the package provides generic mapper/redducer/combiner 
+ classes, and a set of built-in value aggregators, and a generic utility 
+ class that helps user create map/reduce jobs using the generic class. 
+ The built-in aggregators include:
+ 
+ sum over numeric values count the number of distinct values compute the
+ histogram of values compute the minimum, maximum, media,average, standard
+ deviation of numeric values
+ 
+ The developer using Aggregate will need only to provide a plugin class
+ conforming to the following interface:
+ 
+ public interface ValueAggregatorDescriptor { public ArrayList&lt;Entry&gt;
+ generateKeyValPairs(Object key, Object value); public void
+ configure(Configuration conf); }
+ 
+ The package also provides a base class, ValueAggregatorBaseDescriptor,
+ implementing the above interface. The user can extend the base class and
+ implement generateKeyValPairs accordingly.
+ 
+ The primary work of generateKeyValPairs is to emit one or more key/value
+ pairs based on the input key/value pair. The key in an output key/value pair
+ encode two pieces of information: aggregation type and aggregation id. The
+ value will be aggregated onto the aggregation id according the aggregation
+ type.
+ 
+ This class offers a function to generate a map/reduce job using Aggregate
+ framework. The function takes the following parameters: input directory spec
+ input format (text or sequence file) output directory a file specifying the
+ user plugin class]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorJob -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorJobBase -->
+  <class name="ValueAggregatorJobBase" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ValueAggregatorJobBase"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setup"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getValueAggregatorDescriptor" return="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorDescriptor"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="spec" type="java.lang.String"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getAggregatorDescriptors" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="logSpec"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <field name="DESCRIPTOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DESCRIPTOR_NUM" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="USER_JAR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="aggregatorDescriptorList" type="java.util.ArrayList"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This abstract class implements some common functionalities of the
+ the generic mapper, reducer and combiner classes of Aggregate.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorJobBase -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorMapper -->
+  <class name="ValueAggregatorMapper" extends="org.apache.hadoop.mapreduce.Mapper"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ValueAggregatorMapper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setup"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K1"/>
+      <param name="value" type="V1"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[the map function. It iterates through the value aggregator descriptor 
+  list to generate aggregation id/value pairs and emit them.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class implements the generic mapper of Aggregate.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorMapper -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorReducer -->
+  <class name="ValueAggregatorReducer" extends="org.apache.hadoop.mapreduce.Reducer"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ValueAggregatorReducer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setup"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="reduce"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.io.Text"/>
+      <param name="values" type="java.lang.Iterable"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[@param key
+        the key is expected to be a Text object, whose prefix indicates
+        the type of aggregation to aggregate the values. In effect, data
+        driven computing is achieved. It is assumed that each aggregator's
+        getReport method emits appropriate output for the aggregator. This
+        may be further customized.
+ @param values the values to be aggregated
+ @param context]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class implements the generic reducer of Aggregate.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregatorReducer -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.aggregate.ValueHistogram -->
+  <class name="ValueHistogram" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.aggregate.ValueAggregator"/>
+    <constructor name="ValueHistogram"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="addNextValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[add the given val to the aggregator.
+ 
+ @param val the value to be added. It is expected to be a string
+ in the form of xxxx\tnum, meaning xxxx has num occurrences.]]>
+      </doc>
+    </method>
+    <method name="getReport" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the string representation of this aggregator.
+ It includes the following basic statistics of the histogram:
+    the number of unique values
+    the minimum value
+    the media value
+    the maximum value
+    the average value
+    the standard deviation]]>
+      </doc>
+    </method>
+    <method name="getReportDetails" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return a string representation of the list of value/frequence pairs of 
+ the histogram]]>
+      </doc>
+    </method>
+    <method name="getCombinerOutput" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return a list value/frequence pairs.
+  The return value is expected to be used by the reducer.]]>
+      </doc>
+    </method>
+    <method name="getReportItems" return="java.util.TreeMap"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return a TreeMap representation of the histogram]]>
+      </doc>
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[reset the aggregator]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class implements a value aggregator that computes the 
+ histogram of a sequence of strings.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.aggregate.ValueHistogram -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.chain">
+  <!-- start class org.apache.hadoop.mapreduce.lib.chain.ChainMapper -->
+  <class name="ChainMapper" extends="org.apache.hadoop.mapreduce.Mapper"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ChainMapper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="addMapper"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="klass" type="java.lang.Class"/>
+      <param name="inputKeyClass" type="java.lang.Class"/>
+      <param name="inputValueClass" type="java.lang.Class"/>
+      <param name="outputKeyClass" type="java.lang.Class"/>
+      <param name="outputValueClass" type="java.lang.Class"/>
+      <param name="mapperConf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Adds a {@link Mapper} class to the chain mapper.
+ 
+ <p>
+ The key and values are passed from one element of the chain to the next, by
+ value. For the added Mapper the configuration given for it,
+ <code>mapperConf</code>, have precedence over the job's Configuration. This
+ precedence is in effect when the task is running.
+ </p>
+ <p>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainMapper, this is done by the addMapper for the last mapper in the chain
+ </p>
+ 
+ @param job
+          The job.
+ @param klass
+          the Mapper class to add.
+ @param inputKeyClass
+          mapper input key class.
+ @param inputValueClass
+          mapper input value class.
+ @param outputKeyClass
+          mapper output key class.
+ @param outputValueClass
+          mapper output value class.
+ @param mapperConf
+          a configuration for the Mapper class. It is recommended to use a
+          Configuration without default values using the
+          <code>Configuration(boolean loadDefaults)</code> constructor with
+          FALSE.]]>
+      </doc>
+    </method>
+    <method name="setup"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+    </method>
+    <method name="run"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <doc>
+    <![CDATA[The ChainMapper class allows to use multiple Mapper classes within a single
+ Map task.
+ 
+ <p>
+ The Mapper classes are invoked in a chained (or piped) fashion, the output of
+ the first becomes the input of the second, and so on until the last Mapper,
+ the output of the last Mapper will be written to the task's output.
+ </p>
+ <p>
+ The key functionality of this feature is that the Mappers in the chain do not
+ need to be aware that they are executed in a chain. This enables having
+ reusable specialized Mappers that can be combined to perform composite
+ operations within a single task.
+ </p>
+ <p>
+ Special care has to be taken when creating chains that the key/values output
+ by a Mapper are valid for the following Mapper in the chain. It is assumed
+ all Mappers and the Reduce in the chain use matching output and input key and
+ value classes as no conversion is done by the chaining code.
+ </p>
+ <p>
+ Using the ChainMapper and the ChainReducer classes is possible to compose
+ Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
+ immediate benefit of this pattern is a dramatic reduction in disk IO.
+ </p>
+ <p>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainMapper, this is done by the addMapper for the last mapper in the chain.
+ </p>
+ ChainMapper usage pattern:
+ <p>
+ 
+ <pre>
+ ...
+ Job = new Job(conf);
+
+ Configuration mapAConf = new Configuration(false);
+ ...
+ ChainMapper.addMapper(job, AMap.class, LongWritable.class, Text.class,
+   Text.class, Text.class, true, mapAConf);
+
+ Configuration mapBConf = new Configuration(false);
+ ...
+ ChainMapper.addMapper(job, BMap.class, Text.class, Text.class,
+   LongWritable.class, Text.class, false, mapBConf);
+
+ ...
+
+ job.waitForComplettion(true);
+ ...
+ </pre>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.chain.ChainMapper -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.chain.ChainReducer -->
+  <class name="ChainReducer" extends="org.apache.hadoop.mapreduce.Reducer"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ChainReducer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setReducer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="klass" type="java.lang.Class"/>
+      <param name="inputKeyClass" type="java.lang.Class"/>
+      <param name="inputValueClass" type="java.lang.Class"/>
+      <param name="outputKeyClass" type="java.lang.Class"/>
+      <param name="outputValueClass" type="java.lang.Class"/>
+      <param name="reducerConf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Sets the {@link Reducer} class to the chain job.
+ 
+ <p>
+ The key and values are passed from one element of the chain to the next, by
+ value. For the added Reducer the configuration given for it,
+ <code>reducerConf</code>, have precedence over the job's Configuration.
+ This precedence is in effect when the task is running.
+ </p>
+ <p>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainReducer, this is done by the setReducer or the addMapper for the last
+ element in the chain.
+ </p>
+ 
+ @param job
+          the job
+ @param klass
+          the Reducer class to add.
+ @param inputKeyClass
+          reducer input key class.
+ @param inputValueClass
+          reducer input value class.
+ @param outputKeyClass
+          reducer output key class.
+ @param outputValueClass
+          reducer output value class.
+ @param reducerConf
+          a configuration for the Reducer class. It is recommended to use a
+          Configuration without default values using the
+          <code>Configuration(boolean loadDefaults)</code> constructor with
+          FALSE.]]>
+      </doc>
+    </method>
+    <method name="addMapper"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="klass" type="java.lang.Class"/>
+      <param name="inputKeyClass" type="java.lang.Class"/>
+      <param name="inputValueClass" type="java.lang.Class"/>
+      <param name="outputKeyClass" type="java.lang.Class"/>
+      <param name="outputValueClass" type="java.lang.Class"/>
+      <param name="mapperConf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Adds a {@link Mapper} class to the chain reducer.
+ 
+ <p>
+ The key and values are passed from one element of the chain to the next, by
+ value For the added Mapper the configuration given for it,
+ <code>mapperConf</code>, have precedence over the job's Configuration. This
+ precedence is in effect when the task is running.
+ </p>
+ <p>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainMapper, this is done by the addMapper for the last mapper in the
+ chain.
+ </p>
+ 
+ @param job
+          The job.
+ @param klass
+          the Mapper class to add.
+ @param inputKeyClass
+          mapper input key class.
+ @param inputValueClass
+          mapper input value class.
+ @param outputKeyClass
+          mapper output key class.
+ @param outputValueClass
+          mapper output value class.
+ @param mapperConf
+          a configuration for the Mapper class. It is recommended to use a
+          Configuration without default values using the
+          <code>Configuration(boolean loadDefaults)</code> constructor with
+          FALSE.]]>
+      </doc>
+    </method>
+    <method name="setup"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+    </method>
+    <method name="run"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <doc>
+    <![CDATA[The ChainReducer class allows to chain multiple Mapper classes after a
+ Reducer within the Reducer task.
+ 
+ <p>
+ For each record output by the Reducer, the Mapper classes are invoked in a
+ chained (or piped) fashion. The output of the reducer becomes the input of
+ the first mapper and output of first becomes the input of the second, and so
+ on until the last Mapper, the output of the last Mapper will be written to
+ the task's output.
+ </p>
+ <p>
+ The key functionality of this feature is that the Mappers in the chain do not
+ need to be aware that they are executed after the Reducer or in a chain. This
+ enables having reusable specialized Mappers that can be combined to perform
+ composite operations within a single task.
+ </p>
+ <p>
+ Special care has to be taken when creating chains that the key/values output
+ by a Mapper are valid for the following Mapper in the chain. It is assumed
+ all Mappers and the Reduce in the chain use matching output and input key and
+ value classes as no conversion is done by the chaining code.
+ </p>
+ <p> Using the ChainMapper and the ChainReducer classes is possible to
+ compose Map/Reduce jobs that look like <code>[MAP+ / REDUCE MAP*]</code>. And
+ immediate benefit of this pattern is a dramatic reduction in disk IO. </p>
+ <p>
+ IMPORTANT: There is no need to specify the output key/value classes for the
+ ChainReducer, this is done by the setReducer or the addMapper for the last
+ element in the chain.
+ </p>
+ ChainReducer usage pattern:
+ <p>
+ 
+ <pre>
+ ...
+ Job = new Job(conf);
+ ....
+
+ Configuration reduceConf = new Configuration(false);
+ ...
+ ChainReducer.setReducer(job, XReduce.class, LongWritable.class, Text.class,
+   Text.class, Text.class, true, reduceConf);
+
+ ChainReducer.addMapper(job, CMap.class, Text.class, Text.class,
+   LongWritable.class, Text.class, false, null);
+
+ ChainReducer.addMapper(job, DMap.class, LongWritable.class, Text.class,
+   LongWritable.class, LongWritable.class, true, null);
+
+ ...
+
+ job.waitForCompletion(true);
+ ...
+ </pre>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.chain.ChainReducer -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.db">
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.BigDecimalSplitter -->
+  <class name="BigDecimalSplitter" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.db.DBSplitter"/>
+    <constructor name="BigDecimalSplitter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="split" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="results" type="java.sql.ResultSet"/>
+      <param name="colName" type="java.lang.String"/>
+      <exception name="SQLException" type="java.sql.SQLException"/>
+    </method>
+    <method name="tryDivide" return="java.math.BigDecimal"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="numerator" type="java.math.BigDecimal"/>
+      <param name="denominator" type="java.math.BigDecimal"/>
+      <doc>
+      <![CDATA[Divide numerator by denominator. If impossible in exact mode, use rounding.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Implement DBSplitter over BigDecimal values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.BigDecimalSplitter -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.BooleanSplitter -->
+  <class name="BooleanSplitter" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.db.DBSplitter"/>
+    <constructor name="BooleanSplitter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="split" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="results" type="java.sql.ResultSet"/>
+      <param name="colName" type="java.lang.String"/>
+      <exception name="SQLException" type="java.sql.SQLException"/>
+    </method>
+    <doc>
+    <![CDATA[Implement DBSplitter over boolean values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.BooleanSplitter -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.DataDrivenDBInputFormat -->
+  <class name="DataDrivenDBInputFormat" extends="org.apache.hadoop.mapreduce.lib.db.DBInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="DataDrivenDBInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getSplitter" return="org.apache.hadoop.mapreduce.lib.db.DBSplitter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="sqlDataType" type="int"/>
+      <doc>
+      <![CDATA[@return the DBSplitter implementation to use to divide the table/query into InputSplits.]]>
+      </doc>
+    </method>
+    <method name="getSplits" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getBoundingValsQuery" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return a query which returns the minimum and maximum values for
+ the order-by column.
+
+ The min value should be in the first column, and the
+ max value should be in the second column of the results.]]>
+      </doc>
+    </method>
+    <method name="setBoundingQuery"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="query" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the user-defined bounding query to use with a user-defined query.
+      This *must* include the substring "$CONDITIONS"
+      (DataDrivenDBInputFormat.SUBSTITUTE_TOKEN) inside the WHERE clause,
+      so that DataDrivenDBInputFormat knows where to insert split clauses.
+      e.g., "SELECT foo FROM mytable WHERE $CONDITIONS"
+      This will be expanded to something like:
+        SELECT foo FROM mytable WHERE (id &gt; 100) AND (id &lt; 250)
+      inside each split.]]>
+      </doc>
+    </method>
+    <method name="createDBRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setInput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="inputClass" type="java.lang.Class"/>
+      <param name="tableName" type="java.lang.String"/>
+      <param name="conditions" type="java.lang.String"/>
+      <param name="splitBy" type="java.lang.String"/>
+      <param name="fieldNames" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Note that the "orderBy" column is called the "splitBy" in this version.
+ We reuse the same field, but it's not strictly ordering it -- just partitioning
+ the results.]]>
+      </doc>
+    </method>
+    <method name="setInput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="inputClass" type="java.lang.Class"/>
+      <param name="inputQuery" type="java.lang.String"/>
+      <param name="inputBoundingQuery" type="java.lang.String"/>
+      <doc>
+      <![CDATA[setInput() takes a custom query and a separate "bounding query" to use
+      instead of the custom "count query" used by DBInputFormat.]]>
+      </doc>
+    </method>
+    <field name="SUBSTITUTE_TOKEN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[If users are providing their own query, the following string is expected to
+      appear in the WHERE clause, which will be substituted with a pair of conditions
+      on the input to allow input splits to parallelise the import.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[A InputFormat that reads input data from an SQL table.
+ Operates like DBInputFormat, but instead of using LIMIT and OFFSET to demarcate
+ splits, it tries to generate WHERE clauses which separate the data into roughly
+ equivalent shards.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.DataDrivenDBInputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.DataDrivenDBRecordReader -->
+  <class name="DataDrivenDBRecordReader" extends="org.apache.hadoop.mapreduce.lib.db.DBRecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="DataDrivenDBRecordReader" type="org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit, java.lang.Class, org.apache.hadoop.conf.Configuration, java.sql.Connection, org.apache.hadoop.mapreduce.lib.db.DBConfiguration, java.lang.String, java.lang.String[], java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="SQLException" type="java.sql.SQLException"/>
+      <doc>
+      <![CDATA[@param split The InputSplit to read data for
+ @throws SQLException]]>
+      </doc>
+    </constructor>
+    <method name="getSelectQuery" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the query for selecting the records,
+ subclasses can override this for custom behaviour.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A RecordReader that reads records from a SQL table,
+ using data-driven WHERE clause splits.
+ Emits LongWritables containing the record number as
+ key and DBWritables as value.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.DataDrivenDBRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.DateSplitter -->
+  <class name="DateSplitter" extends="org.apache.hadoop.mapreduce.lib.db.IntegerSplitter"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="DateSplitter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="split" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="results" type="java.sql.ResultSet"/>
+      <param name="colName" type="java.lang.String"/>
+      <exception name="SQLException" type="java.sql.SQLException"/>
+    </method>
+    <method name="dateToString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="d" type="java.util.Date"/>
+      <doc>
+      <![CDATA[Given a Date 'd', format it as a string for use in a SQL date
+ comparison operation.
+ @param d the date to format.
+ @return the string representing this date in SQL with any appropriate
+ quotation characters, etc.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Implement DBSplitter over date/time values.
+ Make use of logic from IntegerSplitter, since date/time are just longs
+ in Java.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.DateSplitter -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.DBConfiguration -->
+  <class name="DBConfiguration" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="DBConfiguration" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="configureDB"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="driverClass" type="java.lang.String"/>
+      <param name="dbUrl" type="java.lang.String"/>
+      <param name="userName" type="java.lang.String"/>
+      <param name="passwd" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Sets the DB access related fields in the {@link Configuration}.  
+ @param conf the configuration
+ @param driverClass JDBC Driver class name
+ @param dbUrl JDBC DB access URL. 
+ @param userName DB access username 
+ @param passwd DB access passwd]]>
+      </doc>
+    </method>
+    <method name="configureDB"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="driverClass" type="java.lang.String"/>
+      <param name="dbUrl" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Sets the DB access related fields in the JobConf.  
+ @param job the job
+ @param driverClass JDBC Driver class name
+ @param dbUrl JDBC DB access URL.]]>
+      </doc>
+    </method>
+    <method name="getConnection" return="java.sql.Connection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+      <exception name="SQLException" type="java.sql.SQLException"/>
+      <doc>
+      <![CDATA[Returns a connection object o the DB 
+ @throws ClassNotFoundException 
+ @throws SQLException]]>
+      </doc>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getInputTableName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setInputTableName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tableName" type="java.lang.String"/>
+    </method>
+    <method name="getInputFieldNames" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setInputFieldNames"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fieldNames" type="java.lang.String[]"/>
+    </method>
+    <method name="getInputConditions" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setInputConditions"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conditions" type="java.lang.String"/>
+    </method>
+    <method name="getInputOrderBy" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setInputOrderBy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="orderby" type="java.lang.String"/>
+    </method>
+    <method name="getInputQuery" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setInputQuery"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="query" type="java.lang.String"/>
+    </method>
+    <method name="getInputCountQuery" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setInputCountQuery"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="query" type="java.lang.String"/>
+    </method>
+    <method name="setInputBoundingQuery"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="query" type="java.lang.String"/>
+    </method>
+    <method name="getInputBoundingQuery" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getInputClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setInputClass"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="inputClass" type="java.lang.Class"/>
+    </method>
+    <method name="getOutputTableName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setOutputTableName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tableName" type="java.lang.String"/>
+    </method>
+    <method name="getOutputFieldNames" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setOutputFieldNames"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fieldNames" type="java.lang.String[]"/>
+    </method>
+    <method name="setOutputFieldCount"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fieldCount" type="int"/>
+    </method>
+    <method name="getOutputFieldCount" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="DRIVER_CLASS_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The JDBC Driver class name]]>
+      </doc>
+    </field>
+    <field name="URL_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[JDBC Database access URL]]>
+      </doc>
+    </field>
+    <field name="USERNAME_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[User name to access the database]]>
+      </doc>
+    </field>
+    <field name="PASSWORD_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Password to access the database]]>
+      </doc>
+    </field>
+    <field name="INPUT_TABLE_NAME_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Input table name]]>
+      </doc>
+    </field>
+    <field name="INPUT_FIELD_NAMES_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Field names in the Input table]]>
+      </doc>
+    </field>
+    <field name="INPUT_CONDITIONS_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[WHERE clause in the input SELECT statement]]>
+      </doc>
+    </field>
+    <field name="INPUT_ORDER_BY_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[ORDER BY clause in the input SELECT statement]]>
+      </doc>
+    </field>
+    <field name="INPUT_QUERY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Whole input query, exluding LIMIT...OFFSET]]>
+      </doc>
+    </field>
+    <field name="INPUT_COUNT_QUERY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Input query to get the count of records]]>
+      </doc>
+    </field>
+    <field name="INPUT_BOUNDING_QUERY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Input query to get the max and min values of the jdbc.input.query]]>
+      </doc>
+    </field>
+    <field name="INPUT_CLASS_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Class name implementing DBWritable which will hold input tuples]]>
+      </doc>
+    </field>
+    <field name="OUTPUT_TABLE_NAME_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Output table name]]>
+      </doc>
+    </field>
+    <field name="OUTPUT_FIELD_NAMES_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Field names in the Output table]]>
+      </doc>
+    </field>
+    <field name="OUTPUT_FIELD_COUNT_PROPERTY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of fields in the Output table]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[A container for configuration property names for jobs with DB input/output.
+  
+ The job can be configured using the static methods in this class, 
+ {@link DBInputFormat}, and {@link DBOutputFormat}. 
+ Alternatively, the properties can be set in the configuration with proper
+ values. 
+   
+ @see DBConfiguration#configureDB(Configuration, String, String, String, String)
+ @see DBInputFormat#setInput(Job, Class, String, String)
+ @see DBInputFormat#setInput(Job, Class, String, String, String, String...)
+ @see DBOutputFormat#setOutput(Job, String, String...)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.DBConfiguration -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.DBInputFormat -->
+  <class name="DBInputFormat" extends="org.apache.hadoop.mapreduce.InputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="DBInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDBConf" return="org.apache.hadoop.mapreduce.lib.db.DBConfiguration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getConnection" return="java.sql.Connection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createConnection" return="java.sql.Connection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDBProductName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="createDBRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getSplits" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getCountQuery" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the query for getting the total number of rows, 
+ subclasses can override this for custom behaviour.]]>
+      </doc>
+    </method>
+    <method name="setInput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="inputClass" type="java.lang.Class"/>
+      <param name="tableName" type="java.lang.String"/>
+      <param name="conditions" type="java.lang.String"/>
+      <param name="orderBy" type="java.lang.String"/>
+      <param name="fieldNames" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Initializes the map-part of the job with the appropriate input settings.
+ 
+ @param job The map-reduce job
+ @param inputClass the class object implementing DBWritable, which is the 
+ Java object holding tuple fields.
+ @param tableName The table to read data from
+ @param conditions The condition which to select data with, 
+ eg. '(updated &gt; 20070101 AND length &gt; 0)'
+ @param orderBy the fieldNames in the orderBy clause.
+ @param fieldNames The field names in the table
+ @see #setInput(Job, Class, String, String)]]>
+      </doc>
+    </method>
+    <method name="setInput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="inputClass" type="java.lang.Class"/>
+      <param name="inputQuery" type="java.lang.String"/>
+      <param name="inputCountQuery" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Initializes the map-part of the job with the appropriate input settings.
+ 
+ @param job The map-reduce job
+ @param inputClass the class object implementing DBWritable, which is the 
+ Java object holding tuple fields.
+ @param inputQuery the input query to select fields. Example : 
+ "SELECT f1, f2, f3 FROM Mytable ORDER BY f1"
+ @param inputCountQuery the input query that returns 
+ the number of records in the table. 
+ Example : "SELECT COUNT(f1) FROM Mytable"
+ @see #setInput(Job, Class, String, String, String, String...)]]>
+      </doc>
+    </method>
+    <method name="closeConnection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <field name="dbProductName" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="conditions" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="connection" type="java.sql.Connection"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="tableName" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="fieldNames" type="java.lang.String[]"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="dbConf" type="org.apache.hadoop.mapreduce.lib.db.DBConfiguration"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A InputFormat that reads input data from an SQL table.
+ <p>
+ DBInputFormat emits LongWritables containing the record number as 
+ key and DBWritables as value. 
+ 
+ The SQL query, and input class can be using one of the two 
+ setInput methods.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.DBInputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.DBOutputFormat -->
+  <class name="DBOutputFormat" extends="org.apache.hadoop.mapreduce.OutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="DBOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="checkOutputSpecs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getOutputCommitter" return="org.apache.hadoop.mapreduce.OutputCommitter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="constructQuery" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="table" type="java.lang.String"/>
+      <param name="fieldNames" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Constructs the query used as the prepared statement to insert data.
+ 
+ @param table
+          the table to insert into
+ @param fieldNames
+          the fields to insert into. If field names are unknown, supply an
+          array of nulls.]]>
+      </doc>
+    </method>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapreduce.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="setOutput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="tableName" type="java.lang.String"/>
+      <param name="fieldNames" type="java.lang.String[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Initializes the reduce-part of the job with 
+ the appropriate output settings
+ 
+ @param job The job
+ @param tableName The table to insert data into
+ @param fieldNames The field names in the table.]]>
+      </doc>
+    </method>
+    <method name="setOutput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="tableName" type="java.lang.String"/>
+      <param name="fieldCount" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Initializes the reduce-part of the job 
+ with the appropriate output settings
+ 
+ @param job The job
+ @param tableName The table to insert data into
+ @param fieldCount the number of fields in the table.]]>
+      </doc>
+    </method>
+    <field name="dbProductName" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A OutputFormat that sends the reduce output to a SQL table.
+ <p> 
+ {@link DBOutputFormat} accepts &lt;key,value&gt; pairs, where 
+ key has a type extending DBWritable. Returned {@link RecordWriter} 
+ writes <b>only the key</b> to the database with a batch SQL query.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.DBOutputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.DBRecordReader -->
+  <class name="DBRecordReader" extends="org.apache.hadoop.mapreduce.RecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="DBRecordReader" type="org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit, java.lang.Class, org.apache.hadoop.conf.Configuration, java.sql.Connection, org.apache.hadoop.mapreduce.lib.db.DBConfiguration, java.lang.String, java.lang.String[], java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="SQLException" type="java.sql.SQLException"/>
+      <doc>
+      <![CDATA[@param split The InputSplit to read data for
+ @throws SQLException]]>
+      </doc>
+    </constructor>
+    <method name="executeQuery" return="java.sql.ResultSet"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="query" type="java.lang.String"/>
+      <exception name="SQLException" type="java.sql.SQLException"/>
+    </method>
+    <method name="getSelectQuery" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the query for selecting the records, 
+ subclasses can override this for custom behaviour.]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getCurrentKey" return="org.apache.hadoop.io.LongWritable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getCurrentValue" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="createValue" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="deprecated, no comment">
+      <doc>
+      <![CDATA[@deprecated]]>
+      </doc>
+    </method>
+    <method name="getPos" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="deprecated, no comment">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@deprecated]]>
+      </doc>
+    </method>
+    <method name="next" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #nextKeyValue()}">
+      <param name="key" type="org.apache.hadoop.io.LongWritable"/>
+      <param name="value" type="T"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[@deprecated Use {@link #nextKeyValue()}]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="nextKeyValue" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getSplit" return="org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFieldNames" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTableName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="getConditions" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDBConf" return="org.apache.hadoop.mapreduce.lib.db.DBConfiguration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="getConnection" return="java.sql.Connection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStatement" return="java.sql.PreparedStatement"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="setStatement"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="stmt" type="java.sql.PreparedStatement"/>
+    </method>
+    <field name="statement" type="java.sql.PreparedStatement"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A RecordReader that reads records from a SQL table.
+ Emits LongWritables containing the record number as 
+ key and DBWritables as value.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.DBRecordReader -->
+  <!-- start interface org.apache.hadoop.mapreduce.lib.db.DBSplitter -->
+  <interface name="DBSplitter"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="split" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="results" type="java.sql.ResultSet"/>
+      <param name="colName" type="java.lang.String"/>
+      <exception name="SQLException" type="java.sql.SQLException"/>
+      <doc>
+      <![CDATA[Given a ResultSet containing one record (and already advanced to that record)
+ with two columns (a low value, and a high value, both of the same type), determine
+ a set of splits that span the given values.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[DBSplitter will generate DBInputSplits to use with DataDrivenDBInputFormat.
+ DataDrivenDBInputFormat needs to interpolate between two values that
+ represent the lowest and highest valued records to import. Depending
+ on the data-type of the column, this requires different behavior.
+ DBSplitter implementations should perform this for a data type or family
+ of data types.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.lib.db.DBSplitter -->
+  <!-- start interface org.apache.hadoop.mapreduce.lib.db.DBWritable -->
+  <interface name="DBWritable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="write"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="statement" type="java.sql.PreparedStatement"/>
+      <exception name="SQLException" type="java.sql.SQLException"/>
+      <doc>
+      <![CDATA[Sets the fields of the object in the {@link PreparedStatement}.
+ @param statement the statement that the fields are put into.
+ @throws SQLException]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resultSet" type="java.sql.ResultSet"/>
+      <exception name="SQLException" type="java.sql.SQLException"/>
+      <doc>
+      <![CDATA[Reads the fields of the object from the {@link ResultSet}. 
+ @param resultSet the {@link ResultSet} to get the fields from.
+ @throws SQLException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Objects that are read from/written to a database should implement
+ <code>DBWritable</code>. DBWritable, is similar to {@link Writable} 
+ except that the {@link #write(PreparedStatement)} method takes a 
+ {@link PreparedStatement}, and {@link #readFields(ResultSet)} 
+ takes a {@link ResultSet}. 
+ <p>
+ Implementations are responsible for writing the fields of the object 
+ to PreparedStatement, and reading the fields of the object from the 
+ ResultSet. 
+ 
+ <p>Example:</p>
+ If we have the following table in the database :
+ <pre>
+ CREATE TABLE MyTable (
+   counter        INTEGER NOT NULL,
+   timestamp      BIGINT  NOT NULL,
+ );
+ </pre>
+ then we can read/write the tuples from/to the table with :
+ <p><pre>
+ public class MyWritable implements Writable, DBWritable {
+   // Some data     
+   private int counter;
+   private long timestamp;
+       
+   //Writable#write() implementation
+   public void write(DataOutput out) throws IOException {
+     out.writeInt(counter);
+     out.writeLong(timestamp);
+   }
+       
+   //Writable#readFields() implementation
+   public void readFields(DataInput in) throws IOException {
+     counter = in.readInt();
+     timestamp = in.readLong();
+   }
+       
+   public void write(PreparedStatement statement) throws SQLException {
+     statement.setInt(1, counter);
+     statement.setLong(2, timestamp);
+   }
+       
+   public void readFields(ResultSet resultSet) throws SQLException {
+     counter = resultSet.getInt(1);
+     timestamp = resultSet.getLong(2);
+   } 
+ }
+ </pre>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.lib.db.DBWritable -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.FloatSplitter -->
+  <class name="FloatSplitter" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.db.DBSplitter"/>
+    <constructor name="FloatSplitter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="split" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="results" type="java.sql.ResultSet"/>
+      <param name="colName" type="java.lang.String"/>
+      <exception name="SQLException" type="java.sql.SQLException"/>
+    </method>
+    <doc>
+    <![CDATA[Implement DBSplitter over floating-point values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.FloatSplitter -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.IntegerSplitter -->
+  <class name="IntegerSplitter" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.db.DBSplitter"/>
+    <constructor name="IntegerSplitter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="split" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="results" type="java.sql.ResultSet"/>
+      <param name="colName" type="java.lang.String"/>
+      <exception name="SQLException" type="java.sql.SQLException"/>
+    </method>
+    <doc>
+    <![CDATA[Implement DBSplitter over integer values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.IntegerSplitter -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.MySQLDataDrivenDBRecordReader -->
+  <class name="MySQLDataDrivenDBRecordReader" extends="org.apache.hadoop.mapreduce.lib.db.DataDrivenDBRecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MySQLDataDrivenDBRecordReader" type="org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit, java.lang.Class, org.apache.hadoop.conf.Configuration, java.sql.Connection, org.apache.hadoop.mapreduce.lib.db.DBConfiguration, java.lang.String, java.lang.String[], java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="SQLException" type="java.sql.SQLException"/>
+    </constructor>
+    <method name="executeQuery" return="java.sql.ResultSet"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="query" type="java.lang.String"/>
+      <exception name="SQLException" type="java.sql.SQLException"/>
+    </method>
+    <doc>
+    <![CDATA[A RecordReader that reads records from a MySQL table via DataDrivenDBRecordReader]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.MySQLDataDrivenDBRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.MySQLDBRecordReader -->
+  <class name="MySQLDBRecordReader" extends="org.apache.hadoop.mapreduce.lib.db.DBRecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MySQLDBRecordReader" type="org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit, java.lang.Class, org.apache.hadoop.conf.Configuration, java.sql.Connection, org.apache.hadoop.mapreduce.lib.db.DBConfiguration, java.lang.String, java.lang.String[], java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="SQLException" type="java.sql.SQLException"/>
+    </constructor>
+    <method name="executeQuery" return="java.sql.ResultSet"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="query" type="java.lang.String"/>
+      <exception name="SQLException" type="java.sql.SQLException"/>
+    </method>
+    <doc>
+    <![CDATA[A RecordReader that reads records from a MySQL table.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.MySQLDBRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.OracleDataDrivenDBInputFormat -->
+  <class name="OracleDataDrivenDBInputFormat" extends="org.apache.hadoop.mapreduce.lib.db.DataDrivenDBInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="OracleDataDrivenDBInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getSplitter" return="org.apache.hadoop.mapreduce.lib.db.DBSplitter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="sqlDataType" type="int"/>
+      <doc>
+      <![CDATA[@return the DBSplitter implementation to use to divide the table/query into InputSplits.]]>
+      </doc>
+    </method>
+    <method name="createDBRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A InputFormat that reads input data from an SQL table in an Oracle db.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.OracleDataDrivenDBInputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.OracleDataDrivenDBRecordReader -->
+  <class name="OracleDataDrivenDBRecordReader" extends="org.apache.hadoop.mapreduce.lib.db.DataDrivenDBRecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="OracleDataDrivenDBRecordReader" type="org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit, java.lang.Class, org.apache.hadoop.conf.Configuration, java.sql.Connection, org.apache.hadoop.mapreduce.lib.db.DBConfiguration, java.lang.String, java.lang.String[], java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="SQLException" type="java.sql.SQLException"/>
+    </constructor>
+    <doc>
+    <![CDATA[A RecordReader that reads records from a Oracle table via DataDrivenDBRecordReader]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.OracleDataDrivenDBRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.OracleDateSplitter -->
+  <class name="OracleDateSplitter" extends="org.apache.hadoop.mapreduce.lib.db.DateSplitter"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="OracleDateSplitter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="dateToString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="d" type="java.util.Date"/>
+    </method>
+    <doc>
+    <![CDATA[Implement DBSplitter over date/time values returned by an Oracle db.
+ Make use of logic from DateSplitter, since this just needs to use
+ some Oracle-specific functions on the formatting end when generating
+ InputSplits.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.OracleDateSplitter -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.OracleDBRecordReader -->
+  <class name="OracleDBRecordReader" extends="org.apache.hadoop.mapreduce.lib.db.DBRecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="OracleDBRecordReader" type="org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit, java.lang.Class, org.apache.hadoop.conf.Configuration, java.sql.Connection, org.apache.hadoop.mapreduce.lib.db.DBConfiguration, java.lang.String, java.lang.String[], java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="SQLException" type="java.sql.SQLException"/>
+    </constructor>
+    <method name="getSelectQuery" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the query for selecting the records from an Oracle DB.]]>
+      </doc>
+    </method>
+    <method name="setSessionTimeZone"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="conn" type="java.sql.Connection"/>
+      <exception name="SQLException" type="java.sql.SQLException"/>
+      <doc>
+      <![CDATA[Set session time zone
+ @param conf The current configuration.
+ We read the 'oracle.sessionTimeZone' property from here.
+ @param conn The connection to alter the timezone properties of.]]>
+      </doc>
+    </method>
+    <field name="SESSION_TIMEZONE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Configuration key to set to a timezone string.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[A RecordReader that reads records from an Oracle SQL table.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.OracleDBRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.db.TextSplitter -->
+  <class name="TextSplitter" extends="org.apache.hadoop.mapreduce.lib.db.BigDecimalSplitter"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TextSplitter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="split" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="results" type="java.sql.ResultSet"/>
+      <param name="colName" type="java.lang.String"/>
+      <exception name="SQLException" type="java.sql.SQLException"/>
+      <doc>
+      <![CDATA[This method needs to determine the splits between two user-provided strings.
+ In the case where the user's strings are 'A' and 'Z', this is not hard; we 
+ could create two splits from ['A', 'M') and ['M', 'Z'], 26 splits for strings
+ beginning with each letter, etc.
+
+ If a user has provided us with the strings "Ham" and "Haze", however, we need
+ to create splits that differ in the third letter.
+
+ The algorithm used is as follows:
+ Since there are 2**16 unicode characters, we interpret characters as digits in
+ base 65536. Given a string 's' containing characters s_0, s_1 .. s_n, we interpret
+ the string as the number: 0.s_0 s_1 s_2.. s_n in base 65536. Having mapped the
+ low and high strings into floating-point values, we then use the BigDecimalSplitter
+ to establish the even split points, then map the resulting floating point values
+ back into strings.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Implement DBSplitter over text strings.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.db.TextSplitter -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.fieldsel">
+  <!-- start class org.apache.hadoop.mapreduce.lib.fieldsel.FieldSelectionHelper -->
+  <class name="FieldSelectionHelper" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FieldSelectionHelper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FieldSelectionHelper" type="org.apache.hadoop.io.Text, org.apache.hadoop.io.Text"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="parseOutputKeyValueSpec" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="keyValueSpec" type="java.lang.String"/>
+      <param name="keyFieldList" type="java.util.List"/>
+      <param name="valueFieldList" type="java.util.List"/>
+    </method>
+    <method name="specToString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fieldSeparator" type="java.lang.String"/>
+      <param name="keyValueSpec" type="java.lang.String"/>
+      <param name="allValueFieldsFrom" type="int"/>
+      <param name="keyFieldList" type="java.util.List"/>
+      <param name="valueFieldList" type="java.util.List"/>
+    </method>
+    <method name="getKey" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getValue" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="extractOutputKeyValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="val" type="java.lang.String"/>
+      <param name="fieldSep" type="java.lang.String"/>
+      <param name="keyFieldList" type="java.util.List"/>
+      <param name="valFieldList" type="java.util.List"/>
+      <param name="allValueFieldsFrom" type="int"/>
+      <param name="ignoreKey" type="boolean"/>
+      <param name="isMap" type="boolean"/>
+    </method>
+    <field name="emptyText" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DATA_FIELD_SEPARATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DATA_FIELD_SEPERATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="Use {@link #DATA_FIELD_SEPARATOR}">
+      <doc>
+      <![CDATA[@deprecated Use {@link #DATA_FIELD_SEPARATOR}]]>
+      </doc>
+    </field>
+    <field name="MAP_OUTPUT_KEY_VALUE_SPEC" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="REDUCE_OUTPUT_KEY_VALUE_SPEC" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class implements a mapper/reducer class that can be used to perform
+ field selections in a manner similar to unix cut. The input data is treated
+ as fields separated by a user specified separator (the default value is
+ "\t"). The user can specify a list of fields that form the map output keys,
+ and a list of fields that form the map output values. If the inputformat is
+ TextInputFormat, the mapper will ignore the key to the map function. and the
+ fields are from the value only. Otherwise, the fields are the union of those
+ from the key and those from the value.
+ 
+ The field separator is under attribute "mapreduce.fieldsel.data.field.separator"
+ 
+ The map output field list spec is under attribute 
+ "mapreduce.fieldsel.map.output.key.value.fields.spec".
+ The value is expected to be like "keyFieldsSpec:valueFieldsSpec"
+ key/valueFieldsSpec are comma (,) separated field spec: fieldSpec,fieldSpec,fieldSpec ...
+ Each field spec can be a simple number (e.g. 5) specifying a specific field, or a range
+ (like 2-5) to specify a range of fields, or an open range (like 3-) specifying all 
+ the fields starting from field 3. The open range field spec applies value fields only.
+ They have no effect on the key fields.
+ 
+ Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields 4,3,0 and 1 for keys,
+ and use fields 6,5,1,2,3,7 and above for values.
+ 
+ The reduce output field list spec is under attribute 
+ "mapreduce.fieldsel.reduce.output.key.value.fields.spec".
+ 
+ The reducer extracts output key/value pairs in a similar manner, except that
+ the key is never ignored.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.fieldsel.FieldSelectionHelper -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.fieldsel.FieldSelectionMapper -->
+  <class name="FieldSelectionMapper" extends="org.apache.hadoop.mapreduce.Mapper"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FieldSelectionMapper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setup"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="val" type="V"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[The identify function. Input key/value pair is written directly to output.]]>
+      </doc>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class implements a mapper class that can be used to perform
+ field selections in a manner similar to unix cut. The input data is treated
+ as fields separated by a user specified separator (the default value is
+ "\t"). The user can specify a list of fields that form the map output keys,
+ and a list of fields that form the map output values. If the inputformat is
+ TextInputFormat, the mapper will ignore the key to the map function. and the
+ fields are from the value only. Otherwise, the fields are the union of those
+ from the key and those from the value.
+ 
+ The field separator is under attribute "mapreduce.fieldsel.data.field.separator"
+ 
+ The map output field list spec is under attribute 
+ "mapreduce.fieldsel.map.output.key.value.fields.spec". 
+ The value is expected to be like
+ "keyFieldsSpec:valueFieldsSpec" key/valueFieldsSpec are comma (,) separated
+ field spec: fieldSpec,fieldSpec,fieldSpec ... Each field spec can be a 
+ simple number (e.g. 5) specifying a specific field, or a range (like 2-5)
+ to specify a range of fields, or an open range (like 3-) specifying all 
+ the fields starting from field 3. The open range field spec applies value
+ fields only. They have no effect on the key fields.
+ 
+ Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields
+ 4,3,0 and 1 for keys, and use fields 6,5,1,2,3,7 and above for values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.fieldsel.FieldSelectionMapper -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.fieldsel.FieldSelectionReducer -->
+  <class name="FieldSelectionReducer" extends="org.apache.hadoop.mapreduce.Reducer"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FieldSelectionReducer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setup"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="reduce"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.io.Text"/>
+      <param name="values" type="java.lang.Iterable"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class implements a reducer class that can be used to perform field
+ selections in a manner similar to unix cut. 
+ 
+ The input data is treated as fields separated by a user specified
+ separator (the default value is "\t"). The user can specify a list of
+ fields that form the reduce output keys, and a list of fields that form
+ the reduce output values. The fields are the union of those from the key
+ and those from the value.
+ 
+ The field separator is under attribute "mapreduce.fieldsel.data.field.separator"
+ 
+ The reduce output field list spec is under attribute 
+ "mapreduce.fieldsel.reduce.output.key.value.fields.spec". 
+ The value is expected to be like
+ "keyFieldsSpec:valueFieldsSpec" key/valueFieldsSpec are comma (,) 
+ separated field spec: fieldSpec,fieldSpec,fieldSpec ... Each field spec
+ can be a simple number (e.g. 5) specifying a specific field, or a range
+ (like 2-5) to specify a range of fields, or an open range (like 3-) 
+ specifying all the fields starting from field 3. The open range field
+ spec applies value fields only. They have no effect on the key fields.
+ 
+ Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields
+ 4,3,0 and 1 for keys, and use fields 6,5,1,2,3,7 and above for values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.fieldsel.FieldSelectionReducer -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.input">
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat -->
+  <class name="CombineFileInputFormat" extends="org.apache.hadoop.mapreduce.lib.input.FileInputFormat"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CombineFileInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[default constructor]]>
+      </doc>
+    </constructor>
+    <method name="setMaxSplitSize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="maxSplitSize" type="long"/>
+      <doc>
+      <![CDATA[Specify the maximum size (in bytes) of each split. Each split is
+ approximately equal to the specified size.]]>
+      </doc>
+    </method>
+    <method name="setMinSplitSizeNode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="minSplitSizeNode" type="long"/>
+      <doc>
+      <![CDATA[Specify the minimum size (in bytes) of each split per node.
+ This applies to data that is left over after combining data on a single
+ node into splits that are of maximum size specified by maxSplitSize.
+ This leftover data will be combined into its own split if its size
+ exceeds minSplitSizeNode.]]>
+      </doc>
+    </method>
+    <method name="setMinSplitSizeRack"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="minSplitSizeRack" type="long"/>
+      <doc>
+      <![CDATA[Specify the minimum size (in bytes) of each split per rack.
+ This applies to data that is left over after combining data on a single
+ rack into splits that are of maximum size specified by maxSplitSize.
+ This leftover data will be combined into its own split if its size
+ exceeds minSplitSizeRack.]]>
+      </doc>
+    </method>
+    <method name="createPool"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="filters" type="java.util.List"/>
+      <doc>
+      <![CDATA[Create a new pool and add the filters to it.
+ A split cannot have files from different pools.]]>
+      </doc>
+    </method>
+    <method name="createPool"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="filters" type="org.apache.hadoop.fs.PathFilter[]"/>
+      <doc>
+      <![CDATA[Create a new pool and add the filters to it. 
+ A pathname can satisfy any one of the specified filters.
+ A split cannot have files from different pools.]]>
+      </doc>
+    </method>
+    <method name="isSplitable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="getSplits" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[This is not implemented yet.]]>
+      </doc>
+    </method>
+    <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="stat" type="org.apache.hadoop.fs.FileStatus"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="SPLIT_MINSIZE_PERNODE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SPLIT_MINSIZE_PERRACK" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[An abstract {@link InputFormat} that returns {@link CombineFileSplit}'s in 
+ {@link InputFormat#getSplits(JobContext)} method. 
+ 
+ Splits are constructed from the files under the input paths. 
+ A split cannot have files from different pools.
+ Each split returned may contain blocks from different files.
+ If a maxSplitSize is specified, then blocks on the same node are
+ combined to form a single split. Blocks that are left over are
+ then combined with other blocks in the same rack. 
+ If maxSplitSize is not specified, then blocks from the same rack
+ are combined in a single split; no attempt is made to create
+ node-local splits.
+ If the maxSplitSize is equal to the block size, then this class
+ is similar to the default splitting behavior in Hadoop: each
+ block is a locally processed split.
+ Subclasses implement 
+ {@link InputFormat#createRecordReader(InputSplit, TaskAttemptContext)}
+ to construct <code>RecordReader</code>'s for 
+ <code>CombineFileSplit</code>'s.
+ 
+ @see CombineFileSplit]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader -->
+  <class name="CombineFileRecordReader" extends="org.apache.hadoop.mapreduce.RecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CombineFileRecordReader" type="org.apache.hadoop.mapreduce.lib.input.CombineFileSplit, org.apache.hadoop.mapreduce.TaskAttemptContext, java.lang.Class"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[A generic RecordReader that can hand out different recordReaders
+ for each chunk in the CombineFileSplit.]]>
+      </doc>
+    </constructor>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="nextKeyValue" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getCurrentKey" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getCurrentValue" return="V"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[return progress based on the amount of data processed so far.]]>
+      </doc>
+    </method>
+    <method name="initNextRecordReader" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the record reader for the next chunk in this CombineFileSplit.]]>
+      </doc>
+    </method>
+    <field name="split" type="org.apache.hadoop.mapreduce.lib.input.CombineFileSplit"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="rrConstructor" type="java.lang.reflect.Constructor"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="idx" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="progress" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="curReader" type="org.apache.hadoop.mapreduce.RecordReader"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A generic RecordReader that can hand out different recordReaders
+ for each chunk in a {@link CombineFileSplit}.
+ A CombineFileSplit can combine data chunks from multiple files. 
+ This class allows using different RecordReaders for processing
+ these data chunks from different files.
+ @see CombineFileSplit]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReaderWrapper -->
+  <class name="CombineFileRecordReaderWrapper" extends="org.apache.hadoop.mapreduce.RecordReader"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CombineFileRecordReaderWrapper" type="org.apache.hadoop.mapreduce.lib.input.FileInputFormat, org.apache.hadoop.mapreduce.lib.input.CombineFileSplit, org.apache.hadoop.mapreduce.TaskAttemptContext, java.lang.Integer"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </constructor>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="nextKeyValue" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getCurrentKey" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getCurrentValue" return="V"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A wrapper class for a record reader that handles a single file split. It
+ delegates most of the methods to the wrapped instance. A concrete subclass
+ needs to provide a constructor that calls this parent constructor with the
+ appropriate input format. The subclass constructor must satisfy the specific
+ constructor signature that is required by
+ <code>CombineFileRecordReader</code>.
+
+ Subclassing is needed to get a concrete record reader wrapper because of the
+ constructor requirement.
+
+ @see CombineFileRecordReader
+ @see CombineFileInputFormat]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReaderWrapper -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.CombineFileSplit -->
+  <class name="CombineFileSplit" extends="org.apache.hadoop.mapreduce.InputSplit"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="CombineFileSplit"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[default constructor]]>
+      </doc>
+    </constructor>
+    <constructor name="CombineFileSplit" type="org.apache.hadoop.fs.Path[], long[], long[], java.lang.String[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="CombineFileSplit" type="org.apache.hadoop.fs.Path[], long[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="CombineFileSplit" type="org.apache.hadoop.mapreduce.lib.input.CombineFileSplit"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Copy constructor]]>
+      </doc>
+    </constructor>
+    <method name="getLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStartOffsets" return="long[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns an array containing the start offsets of the files in the split]]>
+      </doc>
+    </method>
+    <method name="getLengths" return="long[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns an array containing the lengths of the files in the split]]>
+      </doc>
+    </method>
+    <method name="getOffset" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="i" type="int"/>
+      <doc>
+      <![CDATA[Returns the start offset of the i<sup>th</sup> Path]]>
+      </doc>
+    </method>
+    <method name="getLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="i" type="int"/>
+      <doc>
+      <![CDATA[Returns the length of the i<sup>th</sup> Path]]>
+      </doc>
+    </method>
+    <method name="getNumPaths" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the number of Paths in the split]]>
+      </doc>
+    </method>
+    <method name="getPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="i" type="int"/>
+      <doc>
+      <![CDATA[Returns the i<sup>th</sup> Path]]>
+      </doc>
+    </method>
+    <method name="getPaths" return="org.apache.hadoop.fs.Path[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns all the Paths in the split]]>
+      </doc>
+    </method>
+    <method name="getLocations" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns all the Paths where this input-split resides]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A sub-collection of input files. 
+ 
+ Unlike {@link FileSplit}, CombineFileSplit class does not represent 
+ a split of a file, but a split of input files into smaller sets. 
+ A split may contain blocks from different file but all 
+ the blocks in the same split are probably local to some rack <br> 
+ CombineFileSplit can be used to implement {@link RecordReader}'s, 
+ with reading one record per file.
+ 
+ @see FileSplit
+ @see CombineFileInputFormat]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.CombineFileSplit -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.CombineSequenceFileInputFormat -->
+  <class name="CombineSequenceFileInputFormat" extends="org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CombineSequenceFileInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Input format that is a <code>CombineFileInputFormat</code>-equivalent for
+ <code>SequenceFileInputFormat</code>.
+
+ @see CombineFileInputFormat]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.CombineSequenceFileInputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.CombineTextInputFormat -->
+  <class name="CombineTextInputFormat" extends="org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CombineTextInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Input format that is a <code>CombineFileInputFormat</code>-equivalent for
+ <code>TextInputFormat</code>.
+
+ @see CombineFileInputFormat]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.CombineTextInputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.FileInputFormat -->
+  <class name="FileInputFormat" extends="org.apache.hadoop.mapreduce.InputFormat"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FileInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setInputDirRecursive"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="inputDirRecursive" type="boolean"/>
+      <doc>
+      <![CDATA[@param job
+          the job to modify
+ @param inputDirRecursive]]>
+      </doc>
+    </method>
+    <method name="getInputDirRecursive" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[@param job
+          the job to look at.
+ @return should the files to be read recursively?]]>
+      </doc>
+    </method>
+    <method name="getFormatMinSplitSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the lower bound on split size imposed by the format.
+ @return the number of bytes of the minimal split for this format]]>
+      </doc>
+    </method>
+    <method name="isSplitable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <param name="filename" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Is the given filename splittable? Usually, true, but if the file is
+ stream compressed, it will not be.
+
+ The default implementation in <code>FileInputFormat</code> always returns
+ true. Implementations that may deal with non-splittable files <i>must</i>
+ override this method.
+
+ <code>FileInputFormat</code> implementations can override this and return
+ <code>false</code> to ensure that individual input files are never split-up
+ so that {@link Mapper}s process entire files.
+ 
+ @param context the job context
+ @param filename the file name to check
+ @return is this file splitable?]]>
+      </doc>
+    </method>
+    <method name="setInputPathFilter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="filter" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set a PathFilter to be applied to the input paths for the map-reduce job.
+ @param job the job to modify
+ @param filter the PathFilter class use for filtering the input paths.]]>
+      </doc>
+    </method>
+    <method name="setMinInputSplitSize"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="size" type="long"/>
+      <doc>
+      <![CDATA[Set the minimum input split size
+ @param job the job to modify
+ @param size the minimum size]]>
+      </doc>
+    </method>
+    <method name="getMinSplitSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Get the minimum split size
+ @param job the job
+ @return the minimum number of bytes that can be in a split]]>
+      </doc>
+    </method>
+    <method name="setMaxInputSplitSize"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="size" type="long"/>
+      <doc>
+      <![CDATA[Set the maximum split size
+ @param job the job to modify
+ @param size the maximum split size]]>
+      </doc>
+    </method>
+    <method name="getMaxSplitSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Get the maximum split size.
+ @param context the job to look at.
+ @return the maximum number of bytes a split can include]]>
+      </doc>
+    </method>
+    <method name="getInputPathFilter" return="org.apache.hadoop.fs.PathFilter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Get a PathFilter instance of the filter set for the input paths.
+
+ @return the PathFilter instance set for the job, NULL if none has been set.]]>
+      </doc>
+    </method>
+    <method name="listStatus" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[List input directories.
+ Subclasses may override to, e.g., select only files matching a regular
+ expression. 
+
+ If security is enabled, this method collects
+ delegation tokens from the input paths and adds them to the job's
+ credentials.
+ @param job the job to list input paths for and attach tokens to.
+ @return array of FileStatus objects
+ @throws IOException if zero items.]]>
+      </doc>
+    </method>
+    <method name="addInputPathRecursively"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="result" type="java.util.List"/>
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="inputFilter" type="org.apache.hadoop.fs.PathFilter"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Add files in the input path recursively into the results.
+ @param result
+          The List to store all files.
+ @param fs
+          The FileSystem.
+ @param path
+          The input path.
+ @param inputFilter
+          The input filter that can be used to filter files/dirs. 
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="makeSplit" return="org.apache.hadoop.mapreduce.lib.input.FileSplit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="start" type="long"/>
+      <param name="length" type="long"/>
+      <param name="hosts" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[A factory that makes the split for this class. It can be overridden
+ by sub-classes to make sub-types]]>
+      </doc>
+    </method>
+    <method name="makeSplit" return="org.apache.hadoop.mapreduce.lib.input.FileSplit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+      <param name="start" type="long"/>
+      <param name="length" type="long"/>
+      <param name="hosts" type="java.lang.String[]"/>
+      <param name="inMemoryHosts" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[A factory that makes the split for this class. It can be overridden
+ by sub-classes to make sub-types]]>
+      </doc>
+    </method>
+    <method name="getSplits" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Generate the list of files and make them into FileSplits.
+ @param job the job context
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="computeSplitSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="blockSize" type="long"/>
+      <param name="minSize" type="long"/>
+      <param name="maxSize" type="long"/>
+    </method>
+    <method name="getBlockIndex" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/>
+      <param name="offset" type="long"/>
+    </method>
+    <method name="setInputPaths"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="commaSeparatedPaths" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Sets the given comma separated paths as the list of inputs 
+ for the map-reduce job.
+ 
+ @param job the job
+ @param commaSeparatedPaths Comma separated paths to be set as 
+        the list of inputs for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="addInputPaths"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="commaSeparatedPaths" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Add the given comma separated paths to the list of inputs for
+  the map-reduce job.
+ 
+ @param job The job to modify
+ @param commaSeparatedPaths Comma separated paths to be added to
+        the list of inputs for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="setInputPaths"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="inputPaths" type="org.apache.hadoop.fs.Path[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the array of {@link Path}s as the list of inputs
+ for the map-reduce job.
+ 
+ @param job The job to modify 
+ @param inputPaths the {@link Path}s of the input directories/files 
+ for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="addInputPath"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job.
+ 
+ @param job The {@link Job} to modify
+ @param path {@link Path} to be added to the list of inputs for 
+            the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Get the list of input {@link Path}s for the map-reduce job.
+ 
+ @param context The job
+ @return the list of input {@link Path}s for the map-reduce job.]]>
+      </doc>
+    </method>
+    <field name="INPUT_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SPLIT_MAXSIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SPLIT_MINSIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="PATHFILTER_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NUM_INPUT_FILES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="INPUT_DIR_RECURSIVE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="INPUT_DIR_NONRECURSIVE_IGNORE_SUBDIRS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LIST_STATUS_NUM_THREADS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_LIST_STATUS_NUM_THREADS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A base class for file-based {@link InputFormat}s.
+
+ <p><code>FileInputFormat</code> is the base class for all file-based 
+ <code>InputFormat</code>s. This provides a generic implementation of
+ {@link #getSplits(JobContext)}.
+
+ Implementations of <code>FileInputFormat</code> can also override the
+ {@link #isSplitable(JobContext, Path)} method to prevent input files
+ from being split-up in certain situations. Implementations that may
+ deal with non-splittable files <i>must</i> override this method, since
+ the default implementation assumes splitting is always possible.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.FileInputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter -->
+  <class name="FileInputFormatCounter" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.FileSplit -->
+  <class name="FileSplit" extends="org.apache.hadoop.mapreduce.InputSplit"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="FileSplit"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a split with host information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block, possibly null]]>
+      </doc>
+    </constructor>
+    <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[], java.lang.String[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructs a split with host and cached-blocks information
+
+ @param file the file name
+ @param start the position of the first byte in the file to process
+ @param length the number of bytes in the file to process
+ @param hosts the list of hosts containing the block
+ @param inMemoryHosts the list of hosts containing the block in memory]]>
+      </doc>
+    </constructor>
+    <method name="getPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The file containing this split's data.]]>
+      </doc>
+    </method>
+    <method name="getStart" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The position of the first byte in the file to process.]]>
+      </doc>
+    </method>
+    <method name="getLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The number of bytes in the file to process.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getLocations" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getLocationInfo" return="org.apache.hadoop.mapred.SplitLocationInfo[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[A section of an input file.  Returned by {@link
+ InputFormat#getSplits(JobContext)} and passed to
+ {@link InputFormat#createRecordReader(InputSplit,TaskAttemptContext)}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.FileSplit -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.FixedLengthInputFormat -->
+  <class name="FixedLengthInputFormat" extends="org.apache.hadoop.mapreduce.lib.input.FileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FixedLengthInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setRecordLength"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="recordLength" type="int"/>
+      <doc>
+      <![CDATA[Set the length of each record
+ @param conf configuration
+ @param recordLength the length of a record]]>
+      </doc>
+    </method>
+    <method name="getRecordLength" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get record length value
+ @param conf configuration
+ @return the record length, zero means none was set]]>
+      </doc>
+    </method>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="isSplitable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <field name="FIXED_RECORD_LENGTH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[FixedLengthInputFormat is an input format used to read input files
+ which contain fixed length records.  The content of a record need not be
+ text.  It can be arbitrary binary data.  Users must configure the record
+ length property by calling:
+ FixedLengthInputFormat.setRecordLength(conf, recordLength);<br><br> or
+ conf.setInt(FixedLengthInputFormat.FIXED_RECORD_LENGTH, recordLength);
+ <br><br>
+ @see FixedLengthRecordReader]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.FixedLengthInputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.InvalidInputException -->
+  <class name="InvalidInputException" extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="InvalidInputException" type="java.util.List"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create the exception with the given list.
+ The first element of the list is used as the init cause value.
+ @param probs the list of problems to report. this list is not copied.]]>
+      </doc>
+    </constructor>
+    <method name="getProblems" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the complete list of the problems reported.
+ @return the list of problems, which must not be modified]]>
+      </doc>
+    </method>
+    <method name="getMessage" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a summary message of the problems found.
+ @return the concatenated messages from all of the problems.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class wraps a list of problems with the input, so that the user
+ can get a list of problems together instead of finding and fixing them one 
+ by one.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.InvalidInputException -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.KeyValueLineRecordReader -->
+  <class name="KeyValueLineRecordReader" extends="org.apache.hadoop.mapreduce.RecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="KeyValueLineRecordReader" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="getKeyClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericSplit" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="findSeparator" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="utf" type="byte[]"/>
+      <param name="start" type="int"/>
+      <param name="length" type="int"/>
+      <param name="sep" type="byte"/>
+    </method>
+    <method name="setKeyValue"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.io.Text"/>
+      <param name="value" type="org.apache.hadoop.io.Text"/>
+      <param name="line" type="byte[]"/>
+      <param name="lineLen" type="int"/>
+      <param name="pos" type="int"/>
+    </method>
+    <method name="nextKeyValue" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read key/value pair in a line.]]>
+      </doc>
+    </method>
+    <method name="getCurrentKey" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getCurrentValue" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="KEY_VALUE_SEPARATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="KEY_VALUE_SEPERATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="Use {@link #KEY_VALUE_SEPARATOR}">
+      <doc>
+      <![CDATA[@deprecated Use {@link #KEY_VALUE_SEPARATOR}]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[This class treats a line in the input as a key/value pair separated by a 
+ separator character. The separator can be specified in config file 
+ under the attribute name mapreduce.input.keyvaluelinerecordreader.key.value.separator. The default
+ separator is the tab character ('\t').]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.KeyValueLineRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat -->
+  <class name="KeyValueTextInputFormat" extends="org.apache.hadoop.mapreduce.lib.input.FileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="KeyValueTextInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="isSplitable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericSplit" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines.
+ Either line feed or carriage-return are used to signal end of line. 
+ Each line is divided into key and value parts by a separator byte. If no
+ such a byte exists, the key will be the entire line and value will be empty.
+ The separator byte can be specified in config file under the attribute name
+ mapreduce.input.keyvaluelinerecordreader.key.value.separator. The default
+ is the tab character ('\t').]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.MultipleInputs -->
+  <class name="MultipleInputs" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MultipleInputs"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="addInputPath"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="inputFormatClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Add a {@link Path} with a custom {@link InputFormat} to the list of
+ inputs for the map-reduce job.
+ 
+ @param job The {@link Job}
+ @param path {@link Path} to be added to the list of inputs for the job
+ @param inputFormatClass {@link InputFormat} class to use for this path]]>
+      </doc>
+    </method>
+    <method name="addInputPath"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="inputFormatClass" type="java.lang.Class"/>
+      <param name="mapperClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Add a {@link Path} with a custom {@link InputFormat} and
+ {@link Mapper} to the list of inputs for the map-reduce job.
+ 
+ @param job The {@link Job}
+ @param path {@link Path} to be added to the list of inputs for the job
+ @param inputFormatClass {@link InputFormat} class to use for this path
+ @param mapperClass {@link Mapper} class to use for this path]]>
+      </doc>
+    </method>
+    <field name="DIR_FORMATS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DIR_MAPPERS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class supports MapReduce jobs that have multiple input paths with
+ a different {@link InputFormat} and {@link Mapper} for each path]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.MultipleInputs -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.NLineInputFormat -->
+  <class name="NLineInputFormat" extends="org.apache.hadoop.mapreduce.lib.input.FileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NLineInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="genericSplit" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getSplits" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Logically splits the set of input files for the job, splits N lines
+ of the input as one split.
+ 
+ @see FileInputFormat#getSplits(JobContext)]]>
+      </doc>
+    </method>
+    <method name="getSplitsForFile" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="status" type="org.apache.hadoop.fs.FileStatus"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="numLinesPerSplit" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createFileSplit" return="org.apache.hadoop.mapreduce.lib.input.FileSplit"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fileName" type="org.apache.hadoop.fs.Path"/>
+      <param name="begin" type="long"/>
+      <param name="length" type="long"/>
+      <doc>
+      <![CDATA[NLineInputFormat uses LineRecordReader, which always reads
+ (and consumes) at least one character out of its upper split
+ boundary. So to make sure that each mapper gets N lines, we
+ move back the upper split limits of each split 
+ by one character here.
+ @param fileName  Path of file
+ @param begin  the position of the first byte in the file to process
+ @param length  number of bytes in InputSplit
+ @return  FileSplit]]>
+      </doc>
+    </method>
+    <method name="setNumLinesPerSplit"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="numLines" type="int"/>
+      <doc>
+      <![CDATA[Set the number of lines per split
+ @param job the job to modify
+ @param numLines the number of lines per split]]>
+      </doc>
+    </method>
+    <method name="getNumLinesPerSplit" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Get the number of lines per split
+ @param job the job
+ @return the number of lines per split]]>
+      </doc>
+    </method>
+    <field name="LINES_PER_MAP" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[NLineInputFormat which splits N lines of input as one split.
+
+ In many "pleasantly" parallel applications, each process/mapper 
+ processes the same input file (s), but with computations are 
+ controlled by different parameters.(Referred to as "parameter sweeps").
+ One way to achieve this, is to specify a set of parameters 
+ (one set per line) as input in a control file 
+ (which is the input path to the map-reduce application,
+ where as the input dataset is specified 
+ via a config variable in JobConf.).
+ 
+ The NLineInputFormat can be used in such applications, that splits 
+ the input file such that by default, one line is fed as
+ a value to one map task, and key is the offset.
+ i.e. (k,v) is (LongWritable, Text).
+ The location hints will span the whole mapred cluster.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.NLineInputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat -->
+  <class name="SequenceFileAsBinaryInputFormat" extends="org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SequenceFileAsBinaryInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[InputFormat reading keys, values from SequenceFiles in binary (raw)
+ format.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.SequenceFileAsTextInputFormat -->
+  <class name="SequenceFileAsTextInputFormat" extends="org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SequenceFileAsTextInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[This class is similar to SequenceFileInputFormat, except it generates
+ SequenceFileAsTextRecordReader which converts the input keys and values
+ to their String forms by calling toString() method.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.SequenceFileAsTextInputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.SequenceFileAsTextRecordReader -->
+  <class name="SequenceFileAsTextRecordReader" extends="org.apache.hadoop.mapreduce.RecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SequenceFileAsTextRecordReader"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getCurrentKey" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getCurrentValue" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="nextKeyValue" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Read key/value pair in a line.]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[This class converts the input keys and values to their String forms by
+ calling toString() method. This class to SequenceFileAsTextInputFormat
+ class is as LineRecordReader class to TextInputFormat class.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.SequenceFileAsTextRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter -->
+  <class name="SequenceFileInputFilter" extends="org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SequenceFileInputFilter"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a record reader for the given split
+ @param split file split
+ @param context the task-attempt context
+ @return RecordReader]]>
+      </doc>
+    </method>
+    <method name="setFilterClass"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="filterClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[set the filter class
+ 
+ @param job The job
+ @param filterClass filter class]]>
+      </doc>
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILTER_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILTER_FREQUENCY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILTER_REGEX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A class that allows a map/red job to work on a sample of sequence files.
+ The sample is decided by the filter class set by the job.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFilter -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat -->
+  <class name="SequenceFileInputFormat" extends="org.apache.hadoop.mapreduce.lib.input.FileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SequenceFileInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getFormatMinSplitSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="listStatus" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[An {@link InputFormat} for {@link SequenceFile}s.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader -->
+  <class name="SequenceFileRecordReader" extends="org.apache.hadoop.mapreduce.RecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SequenceFileRecordReader"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="nextKeyValue" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getCurrentKey" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getCurrentValue" return="V"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the progress within the input split
+ @return 0.0 to 1.0 of the input byte range]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="conf" type="org.apache.hadoop.conf.Configuration"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[An {@link RecordReader} for {@link SequenceFile}s.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.input.TextInputFormat -->
+  <class name="TextInputFormat" extends="org.apache.hadoop.mapreduce.lib.input.FileInputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TextInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+    </method>
+    <method name="isSplitable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <param name="file" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <doc>
+    <![CDATA[An {@link InputFormat} for plain text files.  Files are broken into lines.
+ Either linefeed or carriage-return are used to signal end of line.  Keys are
+ the position in the file, and values are the line of text..]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.input.TextInputFormat -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.jobcontrol">
+  <!-- start class org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob -->
+  <class name="ControlledJob" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ControlledJob" type="org.apache.hadoop.mapreduce.Job, java.util.List"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct a job.
+ @param job a mapreduce job to be executed.
+ @param dependingJobs an array of jobs the current job depends on]]>
+      </doc>
+    </constructor>
+    <constructor name="ControlledJob" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Construct a job.
+ 
+ @param conf mapred job configuration representing a job to be executed.
+ @throws IOException]]>
+      </doc>
+    </constructor>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getJobName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the job name of this job]]>
+      </doc>
+    </method>
+    <method name="setJobName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the job name for  this job.
+ @param jobName the job name]]>
+      </doc>
+    </method>
+    <method name="getJobID" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the job ID of this job assigned by JobControl]]>
+      </doc>
+    </method>
+    <method name="setJobID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="id" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the job ID for  this job.
+ @param id the job ID]]>
+      </doc>
+    </method>
+    <method name="getMapredJobId" return="org.apache.hadoop.mapreduce.JobID"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the mapred ID of this job as assigned by the mapred framework.]]>
+      </doc>
+    </method>
+    <method name="getJob" return="org.apache.hadoop.mapreduce.Job"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the mapreduce job]]>
+      </doc>
+    </method>
+    <method name="setJob"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <doc>
+      <![CDATA[Set the mapreduce job
+ @param job the mapreduce job for this job.]]>
+      </doc>
+    </method>
+    <method name="getJobState" return="org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob.State"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the state of this job]]>
+      </doc>
+    </method>
+    <method name="setJobState"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="state" type="org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob.State"/>
+      <doc>
+      <![CDATA[Set the state for this job.
+ @param state the new state for this job.]]>
+      </doc>
+    </method>
+    <method name="getMessage" return="java.lang.String"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the message of this job]]>
+      </doc>
+    </method>
+    <method name="setMessage"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="message" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the message for this job.
+ @param message the message for this job.]]>
+      </doc>
+    </method>
+    <method name="getDependentJobs" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the depending jobs of this job]]>
+      </doc>
+    </method>
+    <method name="addDependingJob" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dependingJob" type="org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob"/>
+      <doc>
+      <![CDATA[Add a job to this jobs' dependency list. 
+ Dependent jobs can only be added while a Job 
+ is waiting to run, not during or afterwards.
+ 
+ @param dependingJob Job that this Job depends on.
+ @return <tt>true</tt> if the Job was added.]]>
+      </doc>
+    </method>
+    <method name="isCompleted" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return true if this job is in a complete state]]>
+      </doc>
+    </method>
+    <method name="isReady" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return true if this job is in READY state]]>
+      </doc>
+    </method>
+    <method name="killJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="failJob"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="message" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="submit"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Submit this job to mapred. The state becomes RUNNING if submission 
+ is successful, FAILED otherwise.]]>
+      </doc>
+    </method>
+    <field name="CREATE_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class encapsulates a MapReduce job and its dependency. It monitors 
+  the states of the depending jobs and updates the state of this job.
+  A job starts in the WAITING state. If it does not have any depending jobs,
+  or all of the depending jobs are in SUCCESS state, then the job state 
+  will become READY. If any depending jobs fail, the job will fail too. 
+  When in READY state, the job can be submitted to Hadoop for execution, with
+  the state changing into RUNNING state. From RUNNING state, the job 
+  can get into SUCCESS or FAILED state, depending 
+  the status of the job execution.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.jobcontrol.JobControl -->
+  <class name="JobControl" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Runnable"/>
+    <constructor name="JobControl" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Construct a job control for a group of jobs.
+ @param groupName a name identifying this group]]>
+      </doc>
+    </constructor>
+    <method name="getWaitingJobList" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the jobs in the waiting state]]>
+      </doc>
+    </method>
+    <method name="getRunningJobList" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the jobs in the running state]]>
+      </doc>
+    </method>
+    <method name="getReadyJobsList" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the jobs in the ready state]]>
+      </doc>
+    </method>
+    <method name="getSuccessfulJobList" return="java.util.List"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the jobs in the success state]]>
+      </doc>
+    </method>
+    <method name="getFailedJobList" return="java.util.List"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="addJob" return="java.lang.String"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="aJob" type="org.apache.hadoop.mapreduce.lib.jobcontrol.ControlledJob"/>
+      <doc>
+      <![CDATA[Add a new controlled job.
+ @param aJob the new controlled job]]>
+      </doc>
+    </method>
+    <method name="addJob" return="java.lang.String"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="aJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/>
+      <doc>
+      <![CDATA[Add a new job.
+ @param aJob the new job]]>
+      </doc>
+    </method>
+    <method name="addJobCollection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobs" type="java.util.Collection"/>
+      <doc>
+      <![CDATA[Add a collection of jobs
+ 
+ @param jobs]]>
+      </doc>
+    </method>
+    <method name="getThreadState" return="org.apache.hadoop.mapreduce.lib.jobcontrol.JobControl.ThreadState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the thread state]]>
+      </doc>
+    </method>
+    <method name="stop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[set the thread state to STOPPING so that the 
+ thread will stop when it wakes up.]]>
+      </doc>
+    </method>
+    <method name="suspend"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[suspend the running thread]]>
+      </doc>
+    </method>
+    <method name="resume"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[resume the suspended thread]]>
+      </doc>
+    </method>
+    <method name="allFinished" return="boolean"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="run"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The main loop for the thread.
+  The loop does the following:
+  	Check the states of the running jobs
+  	Update the states of waiting jobs
+  	Submit the jobs in ready state]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class encapsulates a set of MapReduce jobs and its dependency.
+   
+  It tracks the states of the jobs by placing them into different tables
+  according to their states. 
+  
+  This class provides APIs for the client app to add a job to the group 
+  and to get the jobs in the group in different states. When a job is 
+  added, an ID unique to the group is assigned to the job. 
+  
+  This class has a thread that submits jobs when they become ready, 
+  monitors the states of the running jobs, and updates the states of jobs
+  based on the state changes of their depending jobs states. The class 
+  provides APIs for suspending/resuming the thread, and 
+  for stopping the thread.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.jobcontrol.JobControl -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.join">
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.ArrayListBackedIterator -->
+  <class name="ArrayListBackedIterator" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.join.ResetableIterator"/>
+    <constructor name="ArrayListBackedIterator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ArrayListBackedIterator" type="java.util.ArrayList"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="hasNext" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="next" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="X"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="replay" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="X"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="item" type="X"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="clear"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[This class provides an implementation of ResetableIterator. The
+ implementation uses an {@link java.util.ArrayList} to store elements
+ added to it, replaying them as requested.
+ Prefer {@link StreamBackedIterator}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.ArrayListBackedIterator -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.ComposableInputFormat -->
+  <class name="ComposableInputFormat" extends="org.apache.hadoop.mapreduce.InputFormat"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ComposableInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.lib.join.ComposableRecordReader"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <doc>
+    <![CDATA[Refinement of InputFormat requiring implementors to provide
+ ComposableRecordReader instead of RecordReader.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.ComposableInputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.ComposableRecordReader -->
+  <class name="ComposableRecordReader" extends="org.apache.hadoop.mapreduce.RecordReader"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="ComposableRecordReader"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Additional operations required of a RecordReader to participate in a join.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.ComposableRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.CompositeInputFormat -->
+  <class name="CompositeInputFormat" extends="org.apache.hadoop.mapreduce.InputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CompositeInputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setFormat"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Interpret a given string as a composite expression.
+ {@code
+   func  ::= <ident>([<func>,]*<func>)
+   func  ::= tbl(<class>,"<path>")
+   class ::= @see java.lang.Class#forName(java.lang.String)
+   path  ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String)
+ }
+ Reads expression from the <tt>mapreduce.join.expr</tt> property and
+ user-supplied join types from <tt>mapreduce.join.define.&lt;ident&gt;</tt>
+  types. Paths supplied to <tt>tbl</tt> are given as input paths to the
+ InputFormat class listed.
+ @see #compose(java.lang.String, java.lang.Class, java.lang.String...)]]>
+      </doc>
+    </method>
+    <method name="addDefaults"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Adds the default set of identifiers to the parser.]]>
+      </doc>
+    </method>
+    <method name="getSplits" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Build a CompositeInputSplit from the child InputFormats by assigning the
+ ith split from each child to the ith composite split.]]>
+      </doc>
+    </method>
+    <method name="createRecordReader" return="org.apache.hadoop.mapreduce.RecordReader"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Construct a CompositeRecordReader for the children of this InputFormat
+ as defined in the init expression.
+ The outermost join need only be composable, not necessarily a composite.
+ Mandating TupleWritable isn't strictly correct.]]>
+      </doc>
+    </method>
+    <method name="compose" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="inf" type="java.lang.Class"/>
+      <param name="path" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Convenience method for constructing composite formats.
+ Given InputFormat class (inf), path (p) return:
+ {@code tbl(<inf>, <p>) }]]>
+      </doc>
+    </method>
+    <method name="compose" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="op" type="java.lang.String"/>
+      <param name="inf" type="java.lang.Class"/>
+      <param name="path" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+      </doc>
+    </method>
+    <method name="compose" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="op" type="java.lang.String"/>
+      <param name="inf" type="java.lang.Class"/>
+      <param name="path" type="org.apache.hadoop.fs.Path[]"/>
+      <doc>
+      <![CDATA[Convenience method for constructing composite formats.
+ Given operation (op), Object class (inf), set of paths (p) return:
+ {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]>
+      </doc>
+    </method>
+    <field name="JOIN_EXPR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="JOIN_COMPARATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[An InputFormat capable of performing joins over a set of data sources sorted
+ and partitioned the same way.
+
+ A user may define new join types by setting the property
+ <tt>mapreduce.join.define.&lt;ident&gt;</tt> to a classname. 
+ In the expression <tt>mapreduce.join.expr</tt>, the identifier will be
+ assumed to be a ComposableRecordReader.
+ <tt>mapreduce.join.keycomparator</tt> can be a classname used to compare 
+ keys in the join.
+ @see #setFormat
+ @see JoinRecordReader
+ @see MultiFilterRecordReader]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.CompositeInputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.CompositeInputSplit -->
+  <class name="CompositeInputSplit" extends="org.apache.hadoop.mapreduce.InputSplit"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="CompositeInputSplit"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="CompositeInputSplit" type="int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="s" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Add an InputSplit to this collection.
+ @throws IOException If capacity was not specified during construction
+                     or if capacity has been reached.]]>
+      </doc>
+    </method>
+    <method name="get" return="org.apache.hadoop.mapreduce.InputSplit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="i" type="int"/>
+      <doc>
+      <![CDATA[Get ith child InputSplit.]]>
+      </doc>
+    </method>
+    <method name="getLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return the aggregate length of all child InputSplits currently added.]]>
+      </doc>
+    </method>
+    <method name="getLength" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="i" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the length of ith child InputSplit.]]>
+      </doc>
+    </method>
+    <method name="getLocations" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Collect a set of hosts from all child InputSplits.]]>
+      </doc>
+    </method>
+    <method name="getLocation" return="java.lang.String[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="i" type="int"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[getLocations from ith InputSplit.]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write splits in the following format.
+ {@code
+ <count><class1><class2>...<classn><split1><split2>...<splitn>
+ }]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}
+ @throws IOException If the child InputSplit cannot be read, typically
+                     for failing access checks.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This InputSplit contains a set of child InputSplits. Any InputSplit inserted
+ into this collection must have a public default constructor.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.CompositeInputSplit -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.CompositeRecordReader -->
+  <class name="CompositeRecordReader" extends="org.apache.hadoop.mapreduce.lib.join.ComposableRecordReader"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="CompositeRecordReader" type="int, int, java.lang.Class"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a RecordReader with <tt>capacity</tt> children to position
+ <tt>id</tt> in the parent reader.
+ The id of a root CompositeRecordReader is -1 by convention, but relying
+ on this is not recommended.]]>
+      </doc>
+    </constructor>
+    <method name="combine" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="srcs" type="java.lang.Object[]"/>
+      <param name="value" type="org.apache.hadoop.mapreduce.lib.join.TupleWritable"/>
+    </method>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="id" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the position in the collector this class occupies.]]>
+      </doc>
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="getRecordReaderQueue" return="java.util.PriorityQueue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return sorted list of RecordReaders for this composite.]]>
+      </doc>
+    </method>
+    <method name="getComparator" return="org.apache.hadoop.io.WritableComparator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return comparator defining the ordering for RecordReaders in this
+ composite.]]>
+      </doc>
+    </method>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="rr" type="org.apache.hadoop.mapreduce.lib.join.ComposableRecordReader"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Add a RecordReader to this collection.
+ The id() of a RecordReader determines where in the Tuple its
+ entry will appear. Adding RecordReaders with the same id has
+ undefined behavior.]]>
+      </doc>
+    </method>
+    <method name="key" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the key for the current join or the value at the top of the
+ RecordReader heap.]]>
+      </doc>
+    </method>
+    <method name="key"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Clone the key at the top of this RR into the given object.]]>
+      </doc>
+    </method>
+    <method name="getCurrentKey" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hasNext" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return true if it is possible that this could emit more values.]]>
+      </doc>
+    </method>
+    <method name="skip"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Pass skip key to child RRs.]]>
+      </doc>
+    </method>
+    <method name="getDelegate" return="org.apache.hadoop.mapreduce.lib.join.ResetableIterator"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Obtain an iterator over the child RRs apropos of the value type
+ ultimately emitted from this join.]]>
+      </doc>
+    </method>
+    <method name="accept"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jc" type="org.apache.hadoop.mapreduce.lib.join.CompositeRecordReader.JoinCollector"/>
+      <param name="key" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[If key provided matches that of this Composite, give JoinCollector
+ iterator over values it may emit.]]>
+      </doc>
+    </method>
+    <method name="fillJoinCollector"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="iterkey" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[For all child RRs offering the key provided, obtain an iterator
+ at that position in the JoinCollector.]]>
+      </doc>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.mapreduce.lib.join.ComposableRecordReader"/>
+      <doc>
+      <![CDATA[Implement Comparable contract (compare key of join or head of heap
+ with that of another).]]>
+      </doc>
+    </method>
+    <method name="createKey" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new key common to all child RRs.
+ @throws ClassCastException if key classes differ.]]>
+      </doc>
+    </method>
+    <method name="createTupleWritable" return="org.apache.hadoop.mapreduce.lib.join.TupleWritable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a value to be used internally for joins.]]>
+      </doc>
+    </method>
+    <method name="getCurrentValue" return="X"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Close all child RRs.]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Report progress as the minimum of all child RR progress.]]>
+      </doc>
+    </method>
+    <field name="conf" type="org.apache.hadoop.conf.Configuration"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="keyclass" type="java.lang.Class"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="jc" type="org.apache.hadoop.mapreduce.lib.join.CompositeRecordReader.JoinCollector"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="kids" type="org.apache.hadoop.mapreduce.lib.join.ComposableRecordReader[]"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="key" type="K"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="value" type="X"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A RecordReader that can effect joins of RecordReaders sharing a common key
+ type and partitioning.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.CompositeRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.InnerJoinRecordReader -->
+  <class name="InnerJoinRecordReader" extends="org.apache.hadoop.mapreduce.lib.join.JoinRecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="combine" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="srcs" type="java.lang.Object[]"/>
+      <param name="dst" type="org.apache.hadoop.mapreduce.lib.join.TupleWritable"/>
+      <doc>
+      <![CDATA[Return true iff the tuple is full (all data sources contain this key).]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Full inner join.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.InnerJoinRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.JoinRecordReader -->
+  <class name="JoinRecordReader" extends="org.apache.hadoop.mapreduce.lib.join.CompositeRecordReader"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="JoinRecordReader" type="int, org.apache.hadoop.conf.Configuration, int, java.lang.Class"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="nextKeyValue" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Emit the next set of key, value pairs as defined by the child
+ RecordReaders and operation associated with this composite RR.]]>
+      </doc>
+    </method>
+    <method name="createValue" return="org.apache.hadoop.mapreduce.lib.join.TupleWritable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDelegate" return="org.apache.hadoop.mapreduce.lib.join.ResetableIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return an iterator wrapping the JoinCollector.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Base class for Composite joins returning Tuples of arbitrary Writables.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.JoinRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.MultiFilterRecordReader -->
+  <class name="MultiFilterRecordReader" extends="org.apache.hadoop.mapreduce.lib.join.CompositeRecordReader"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MultiFilterRecordReader" type="int, org.apache.hadoop.conf.Configuration, int, java.lang.Class"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="emit" return="V"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="dst" type="org.apache.hadoop.mapreduce.lib.join.TupleWritable"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[For each tuple emitted, return a value (typically one of the values
+ in the tuple).
+ Modifying the Writables in the tuple is permitted and unlikely to affect
+ join behavior in most cases, but it is not recommended. It's safer to
+ clone first.]]>
+      </doc>
+    </method>
+    <method name="combine" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="srcs" type="java.lang.Object[]"/>
+      <param name="dst" type="org.apache.hadoop.mapreduce.lib.join.TupleWritable"/>
+      <doc>
+      <![CDATA[Default implementation offers {@link #emit} every Tuple from the
+ collector (the outer join of child RRs).]]>
+      </doc>
+    </method>
+    <method name="nextKeyValue" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getDelegate" return="org.apache.hadoop.mapreduce.lib.join.ResetableIterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return an iterator returning a single value from the tuple.
+ @see MultiFilterDelegationIterator]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Base class for Composite join returning values derived from multiple
+ sources, but generally not tuples.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.MultiFilterRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.OuterJoinRecordReader -->
+  <class name="OuterJoinRecordReader" extends="org.apache.hadoop.mapreduce.lib.join.JoinRecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="combine" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="srcs" type="java.lang.Object[]"/>
+      <param name="dst" type="org.apache.hadoop.mapreduce.lib.join.TupleWritable"/>
+      <doc>
+      <![CDATA[Emit everything from the collector.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Full outer join.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.OuterJoinRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.OverrideRecordReader -->
+  <class name="OverrideRecordReader" extends="org.apache.hadoop.mapreduce.lib.join.MultiFilterRecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="emit" return="V"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="dst" type="org.apache.hadoop.mapreduce.lib.join.TupleWritable"/>
+      <doc>
+      <![CDATA[Emit the value with the highest position in the tuple.]]>
+      </doc>
+    </method>
+    <method name="createValue" return="V"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="fillJoinCollector"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="iterkey" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Instead of filling the JoinCollector with iterators from all
+ data sources, fill only the rightmost for this key.
+ This not only saves space by discarding the other sources, but
+ it also emits the number of key-value pairs in the preferred
+ RecordReader instead of repeating that stream n times, where
+ n is the cardinality of the cross product of the discarded
+ streams for the given key.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Prefer the &quot;rightmost&quot; data source for this key.
+ For example, <tt>override(S1,S2,S3)</tt> will prefer values
+ from S3 over S2, and values from S2 over S1 for all keys
+ emitted from all sources.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.OverrideRecordReader -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.Parser -->
+  <class name="Parser" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Parser"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Very simple shift-reduce parser for join expressions.
+
+ This should be sufficient for the user extension permitted now, but ought to
+ be replaced with a parser generator if more complex grammars are supported.
+ In particular, this &quot;shift-reduce&quot; parser has no states. Each set
+ of formals requires a different internal node type, which is responsible for
+ interpreting the list of tokens it receives. This is sufficient for the
+ current grammar, but it has several annoying properties that might inhibit
+ extension. In particular, parenthesis are always function calls; an
+ algebraic or filter grammar would not only require a node type, but must
+ also work around the internals of this parser.
+
+ For most other cases, adding classes to the hierarchy- particularly by
+ extending JoinRecordReader and MultiFilterRecordReader- is fairly
+ straightforward. One need only override the relevant method(s) (usually only
+ {@link CompositeRecordReader#combine}) and include a property to map its
+ value to an identifier in the parser.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.Parser -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.Parser.Node -->
+  <class name="Parser.Node" extends="org.apache.hadoop.mapreduce.lib.join.ComposableInputFormat"
+    abstract="true"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Node" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="addIdentifier"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="ident" type="java.lang.String"/>
+      <param name="mcstrSig" type="java.lang.Class[]"/>
+      <param name="nodetype" type="java.lang.Class"/>
+      <param name="cl" type="java.lang.Class"/>
+      <exception name="NoSuchMethodException" type="java.lang.NoSuchMethodException"/>
+      <doc>
+      <![CDATA[For a given identifier, add a mapping to the nodetype for the parse
+ tree and to the ComposableRecordReader to be created, including the
+ formals required to invoke the constructor.
+ The nodetype and constructor signature should be filled in from the
+ child node.]]>
+      </doc>
+    </method>
+    <method name="setID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="id" type="int"/>
+    </method>
+    <method name="setKeyComparator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="cmpcl" type="java.lang.Class"/>
+    </method>
+    <field name="rrCstrMap" type="java.util.Map"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="id" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="ident" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="cmpcl" type="java.lang.Class"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.Parser.Node -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.Parser.NodeToken -->
+  <class name="Parser.NodeToken" extends="org.apache.hadoop.mapreduce.lib.join.Parser.Token"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getNode" return="org.apache.hadoop.mapreduce.lib.join.Parser.Node"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.Parser.NodeToken -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.Parser.NumToken -->
+  <class name="Parser.NumToken" extends="org.apache.hadoop.mapreduce.lib.join.Parser.Token"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NumToken" type="double"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNum" return="double"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.Parser.NumToken -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.Parser.StrToken -->
+  <class name="Parser.StrToken" extends="org.apache.hadoop.mapreduce.lib.join.Parser.Token"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="StrToken" type="org.apache.hadoop.mapreduce.lib.join.Parser.TType, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getStr" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.Parser.StrToken -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.Parser.Token -->
+  <class name="Parser.Token" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getType" return="org.apache.hadoop.mapreduce.lib.join.Parser.TType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNode" return="org.apache.hadoop.mapreduce.lib.join.Parser.Node"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getNum" return="double"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getStr" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Tagged-union type for tokens from the join expression.
+ @see Parser.TType]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.Parser.Token -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.Parser.TType -->
+  <class name="Parser.TType" extends="java.lang.Enum"
+    abstract="false"
+    static="true" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.mapreduce.lib.join.Parser.TType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.mapreduce.lib.join.Parser.TType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.Parser.TType -->
+  <!-- start interface org.apache.hadoop.mapreduce.lib.join.ResetableIterator -->
+  <interface name="ResetableIterator"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="hasNext" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[True if a call to next may return a value. This is permitted false
+ positives, but not false negatives.]]>
+      </doc>
+    </method>
+    <method name="next" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="T"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Assign next value to actual.
+ It is required that elements added to a ResetableIterator be returned in
+ the same order after a call to {@link #reset} (FIFO).
+
+ Note that a call to this may fail for nested joins (i.e. more elements
+ available, but none satisfying the constraints of the join)]]>
+      </doc>
+    </method>
+    <method name="replay" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="T"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Assign last value returned to actual.]]>
+      </doc>
+    </method>
+    <method name="reset"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Set iterator to return to the start of its range. Must be called after
+ calling {@link #add} to avoid a ConcurrentModificationException.]]>
+      </doc>
+    </method>
+    <method name="add"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="item" type="T"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Add an element to the collection of elements to iterate over.]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Close datasources and release resources. Calling methods on the iterator
+ after calling close has undefined behavior.]]>
+      </doc>
+    </method>
+    <method name="clear"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Close datasources, but do not release internal resources. Calling this
+ method should permit the object to be reused with a different datasource.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This defines an interface to a stateful Iterator that can replay elements
+ added to it directly.
+ Note that this does not extend {@link java.util.Iterator}.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.lib.join.ResetableIterator -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.StreamBackedIterator -->
+  <class name="StreamBackedIterator" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.join.ResetableIterator"/>
+    <constructor name="StreamBackedIterator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="hasNext" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="next" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="X"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="replay" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="val" type="X"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="reset"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="item" type="X"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="clear"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[This class provides an implementation of ResetableIterator. This
+ implementation uses a byte array to store elements added to it.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.StreamBackedIterator -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.TupleWritable -->
+  <class name="TupleWritable" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <implements name="java.lang.Iterable"/>
+    <constructor name="TupleWritable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an empty tuple with no allocated storage for writables.]]>
+      </doc>
+    </constructor>
+    <constructor name="TupleWritable" type="org.apache.hadoop.io.Writable[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Initialize tuple with storage; unknown whether any of them contain
+ &quot;written&quot; values.]]>
+      </doc>
+    </constructor>
+    <method name="has" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="i" type="int"/>
+      <doc>
+      <![CDATA[Return true if tuple has an element at the position provided.]]>
+      </doc>
+    </method>
+    <method name="get" return="org.apache.hadoop.io.Writable"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="i" type="int"/>
+      <doc>
+      <![CDATA[Get ith Writable from Tuple.]]>
+      </doc>
+    </method>
+    <method name="size" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The number of children in this Tuple.]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="iterator" return="java.util.Iterator"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return an iterator over the elements in this tuple.
+ Note that this doesn't flatten the tuple; one may receive tuples
+ from this iterator.]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Convert Tuple to String as in the following.
+ <tt>[&lt;child1&gt;,&lt;child2&gt;,...,&lt;childn&gt;]</tt>]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes each Writable to <code>out</code>.
+ TupleWritable format:
+ {@code
+  <count><type1><type2>...<typen><obj1><obj2>...<objn>
+ }]]>
+      </doc>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <field name="written" type="java.util.BitSet"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Writable type storing multiple {@link org.apache.hadoop.io.Writable}s.
+
+ This is *not* a general-purpose tuple type. In almost all cases, users are
+ encouraged to implement their own serializable types, which can perform
+ better validation and provide more efficient encodings than this class is
+ capable. TupleWritable relies on the join framework for type safety and
+ assumes its instances will rarely be persisted, assumptions not only
+ incompatible with, but contrary to the general case.
+
+ @see org.apache.hadoop.io.Writable]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.TupleWritable -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.join.WrappedRecordReader -->
+  <class name="WrappedRecordReader" extends="org.apache.hadoop.mapreduce.lib.join.ComposableRecordReader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="WrappedRecordReader" type="int"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="split" type="org.apache.hadoop.mapreduce.InputSplit"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="createKey" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Request new key from proxied RR.]]>
+      </doc>
+    </method>
+    <method name="createValue" return="U"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="id" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[{@inheritDoc}]]>
+      </doc>
+    </method>
+    <method name="key" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return the key at the head of this RR.]]>
+      </doc>
+    </method>
+    <method name="key"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="qkey" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Clone the key at the head of this RR into the object supplied.]]>
+      </doc>
+    </method>
+    <method name="hasNext" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return true if the RR- including the k,v pair stored in this object-
+ is exhausted.]]>
+      </doc>
+    </method>
+    <method name="skip"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]>
+      </doc>
+    </method>
+    <method name="accept"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="i" type="org.apache.hadoop.mapreduce.lib.join.CompositeRecordReader.JoinCollector"/>
+      <param name="key" type="K"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Add an iterator to the collector at the position occupied by this
+ RecordReader over the values in this stream paired with the key
+ provided (ie register a stream of values from this source matching K
+ with a collector).]]>
+      </doc>
+    </method>
+    <method name="nextKeyValue" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Read the next k,v pair into the head of this object; return true iff
+ the RR and this are exhausted.]]>
+      </doc>
+    </method>
+    <method name="getCurrentKey" return="K"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get current key]]>
+      </doc>
+    </method>
+    <method name="getCurrentValue" return="U"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get current value]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Request progress from proxied RR.]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Forward close request to proxied RR.]]>
+      </doc>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.mapreduce.lib.join.ComposableRecordReader"/>
+      <doc>
+      <![CDATA[Implement Comparable contract (compare key at head of proxied RR
+ with that of another).]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Return true iff compareTo(other) retn true.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="empty" type="boolean"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="cmp" type="org.apache.hadoop.io.WritableComparator"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Proxy class for a RecordReader participating in the join framework.
+ 
+ This class keeps track of the &quot;head&quot; key-value pair for the
+ provided RecordReader and keeps a store of values matching a key when
+ this source is participating in a join.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.join.WrappedRecordReader -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.map">
+  <!-- start class org.apache.hadoop.mapreduce.lib.map.InverseMapper -->
+  <class name="InverseMapper" extends="org.apache.hadoop.mapreduce.Mapper"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="InverseMapper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[The inverse function.  Input keys and values are swapped.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A {@link Mapper} that swaps keys and values.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.map.InverseMapper -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.map.MultithreadedMapper -->
+  <class name="MultithreadedMapper" extends="org.apache.hadoop.mapreduce.Mapper"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MultithreadedMapper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNumberOfThreads" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[The number of threads in the thread pool that will run the map function.
+ @param job the job
+ @return the number of threads]]>
+      </doc>
+    </method>
+    <method name="setNumberOfThreads"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="threads" type="int"/>
+      <doc>
+      <![CDATA[Set the number of threads in the pool for running maps.
+ @param job the job to modify
+ @param threads the new number of threads]]>
+      </doc>
+    </method>
+    <method name="getMapperClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Get the application's mapper class.
+ @param <K1> the map's input key type
+ @param <V1> the map's input value type
+ @param <K2> the map's output key type
+ @param <V2> the map's output value type
+ @param job the job
+ @return the mapper class to run]]>
+      </doc>
+    </method>
+    <method name="setMapperClass"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="cls" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the application's mapper class.
+ @param <K1> the map input key type
+ @param <V1> the map input value type
+ @param <K2> the map output key type
+ @param <V2> the map output value type
+ @param job the job to modify
+ @param cls the class to use as the mapper]]>
+      </doc>
+    </method>
+    <method name="run"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Run the application's maps using a thread pool.]]>
+      </doc>
+    </method>
+    <field name="NUM_THREADS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MAP_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Multithreaded implementation for @link org.apache.hadoop.mapreduce.Mapper.
+ <p>
+ It can be used instead of the default implementation,
+ {@link org.apache.hadoop.mapred.MapRunner}, when the Map operation is not CPU
+ bound in order to improve throughput.
+ <p>
+ Mapper implementations using this MapRunnable must be thread-safe.
+ <p>
+ The Map-Reduce job has to be configured with the mapper to use via 
+ {@link #setMapperClass(Job, Class)} and
+ the number of thread the thread-pool can use with the
+ {@link #getNumberOfThreads(JobContext)} method. The default
+ value is 10 threads.
+ <p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.map.MultithreadedMapper -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.map.RegexMapper -->
+  <class name="RegexMapper" extends="org.apache.hadoop.mapreduce.Mapper"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RegexMapper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setup"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+    </method>
+    <method name="map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="org.apache.hadoop.io.Text"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <field name="PATTERN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="GROUP" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.map.RegexMapper -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.map.TokenCounterMapper -->
+  <class name="TokenCounterMapper" extends="org.apache.hadoop.mapreduce.Mapper"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TokenCounterMapper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.Object"/>
+      <param name="value" type="org.apache.hadoop.io.Text"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.Mapper.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <doc>
+    <![CDATA[Tokenize the input values and emit each word with a count of 1.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.map.TokenCounterMapper -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.map.WrappedMapper -->
+  <class name="WrappedMapper" extends="org.apache.hadoop.mapreduce.Mapper"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="WrappedMapper"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getMapContext" return="org.apache.hadoop.mapreduce.Mapper.Context"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="mapContext" type="org.apache.hadoop.mapreduce.MapContext"/>
+      <doc>
+      <![CDATA[Get a wrapped {@link Mapper.Context} for custom implementations.
+ @param mapContext <code>MapContext</code> to be wrapped
+ @return a wrapped <code>Mapper.Context</code> for custom implementations]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A {@link Mapper} which wraps a given one to allow custom 
+ {@link Mapper.Context} implementations.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.map.WrappedMapper -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.output">
+  <!-- start class org.apache.hadoop.mapreduce.lib.output.BindingPathOutputCommitter -->
+  <class name="BindingPathOutputCommitter" extends="org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="BindingPathOutputCommitter" type="org.apache.hadoop.fs.Path, org.apache.hadoop.mapreduce.TaskAttemptContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Instantiate.
+ @param outputPath output path (may be null)
+ @param context task context
+ @throws IOException on any failure.]]>
+      </doc>
+    </constructor>
+    <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getWorkPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setupJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setupTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="needsTaskCommit" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="commitTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="abortTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="cleanupJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="commitJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="abortJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <param name="state" type="org.apache.hadoop.mapreduce.JobStatus.State"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="isRecoverySupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isCommitJobRepeatable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="isRecoverySupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobContext" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="recoverTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="taskContext" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="hasOutputPath" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getCommitter" return="org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the inner committer.
+ @return the bonded committer.]]>
+      </doc>
+    </method>
+    <field name="NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The classname for use in configurations.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[This is a special committer which creates the factory for the committer and
+ runs off that. Why does it exist? So that you can explicitly instantiate
+ a committer by classname and yet still have the actual implementation
+ driven dynamically by the factory options and destination filesystem.
+ This simplifies integration
+ with existing code which takes the classname of a committer.
+ There's no factory for this, as that would lead to a loop.
+
+ All commit protocol methods and accessors are delegated to the
+ wrapped committer.
+
+ How to use:
+
+ <ol>
+   <li>
+     In applications which take a classname of committer in
+     a configuration option, set it to the canonical name of this class
+     (see {@link #NAME}). When this class is instantiated, it will
+     use the factory mechanism to locate the configured committer for the
+     destination.
+   </li>
+   <li>
+     In code, explicitly create an instance of this committer through
+     its constructor, then invoke commit lifecycle operations on it.
+     The dynamically configured committer will be created in the constructor
+     and have the lifecycle operations relayed to it.
+   </li>
+ </ol>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.output.BindingPathOutputCommitter -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter -->
+  <class name="FileOutputCommitter" extends="org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FileOutputCommitter" type="org.apache.hadoop.fs.Path, org.apache.hadoop.mapreduce.TaskAttemptContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a file output committer
+ @param outputPath the job's output path, or null if you want the output
+ committer to act as a noop.
+ @param context the task's context
+ @throws IOException]]>
+      </doc>
+    </constructor>
+    <constructor name="FileOutputCommitter" type="org.apache.hadoop.fs.Path, org.apache.hadoop.mapreduce.JobContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a file output committer
+ @param outputPath the job's output path, or null if you want the output
+ committer to act as a noop.
+ @param context the task's context
+ @throws IOException]]>
+      </doc>
+    </constructor>
+    <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the path where final output of the job should be placed.  This
+ could also be considered the committed application attempt path.]]>
+      </doc>
+    </method>
+    <method name="getJobAttemptPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Compute the path where the output of a given job attempt will be placed. 
+ @param context the context of the job.  This is used to get the
+ application attempt id.
+ @return the path to store job attempt data.]]>
+      </doc>
+    </method>
+    <method name="getJobAttemptPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <param name="out" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Compute the path where the output of a given job attempt will be placed. 
+ @param context the context of the job.  This is used to get the
+ application attempt id.
+ @param out the output path to place these in.
+ @return the path to store job attempt data.]]>
+      </doc>
+    </method>
+    <method name="getJobAttemptPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="int"/>
+      <doc>
+      <![CDATA[Compute the path where the output of a given job attempt will be placed. 
+ @param appAttemptId the ID of the application attempt for this job.
+ @return the path to store job attempt data.]]>
+      </doc>
+    </method>
+    <method name="getTaskAttemptPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <doc>
+      <![CDATA[Compute the path where the output of a task attempt is stored until
+ that task is committed.
+ 
+ @param context the context of the task attempt.
+ @return the path where a task attempt should be stored.]]>
+      </doc>
+    </method>
+    <method name="getTaskAttemptPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <param name="out" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Compute the path where the output of a task attempt is stored until
+ that task is committed.
+ 
+ @param context the context of the task attempt.
+ @param out The output path to put things in.
+ @return the path where a task attempt should be stored.]]>
+      </doc>
+    </method>
+    <method name="getCommittedTaskPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <doc>
+      <![CDATA[Compute the path where the output of a committed task is stored until
+ the entire job is committed.
+ @param context the context of the task attempt
+ @return the path where the output of a committed task is stored until
+ the entire job is committed.]]>
+      </doc>
+    </method>
+    <method name="getCommittedTaskPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <param name="out" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="getCommittedTaskPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="int"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <doc>
+      <![CDATA[Compute the path where the output of a committed task is stored until the
+ entire job is committed for a specific application attempt.
+ @param appAttemptId the id of the application attempt to use
+ @param context the context of any task.
+ @return the path where the output of a committed task is stored.]]>
+      </doc>
+    </method>
+    <method name="getWorkPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the directory that the task should write results into.
+ @return the work directory
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="setupJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create the temporary directory that is the root of all of the task 
+ work directories.
+ @param context the job's context]]>
+      </doc>
+    </method>
+    <method name="commitJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The job has completed, so do works in commitJobInternal().
+ Could retry on failure if using algorithm 2.
+ @param context the job's context]]>
+      </doc>
+    </method>
+    <method name="commitJobInternal"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[The job has completed, so do following commit job, include:
+ Move all committed tasks to the final output dir (algorithm 1 only).
+ Delete the temporary directory, including all of the work directories.
+ Create a _SUCCESS file to make it as successful.
+ @param context the job's context]]>
+      </doc>
+    </method>
+    <method name="cleanupJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="abortJob"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <param name="state" type="org.apache.hadoop.mapreduce.JobStatus.State"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Delete the temporary directory, including all of the work directories.
+ @param context the job's context]]>
+      </doc>
+    </method>
+    <method name="setupTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[No task setup required.]]>
+      </doc>
+    </method>
+    <method name="commitTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Move the files from the work directory to the job output directory
+ @param context the task context]]>
+      </doc>
+    </method>
+    <method name="abortTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Delete the work directory
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="needsTaskCommit" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Did this task write any files in the work directory?
+ @param context the task's context]]>
+      </doc>
+    </method>
+    <method name="isRecoverySupported" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isCommitJobRepeatable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="recoverTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="PENDING_DIR_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Name of directory where pending data is placed.  Data that has not been
+ committed yet.]]>
+      </doc>
+    </field>
+    <field name="TEMP_DIR_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Temporary directory name 
+
+ The static variable to be compatible with M/R 1.x]]>
+      </doc>
+    </field>
+    <field name="SUCCEEDED_FILE_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SUCCESSFUL_JOB_OUTPUT_DIR_MARKER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILEOUTPUTCOMMITTER_ALGORITHM_VERSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILEOUTPUTCOMMITTER_ALGORITHM_VERSION_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILEOUTPUTCOMMITTER_CLEANUP_SKIPPED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILEOUTPUTCOMMITTER_CLEANUP_SKIPPED_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILEOUTPUTCOMMITTER_CLEANUP_FAILURES_IGNORED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILEOUTPUTCOMMITTER_CLEANUP_FAILURES_IGNORED_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILEOUTPUTCOMMITTER_FAILURE_ATTEMPTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILEOUTPUTCOMMITTER_FAILURE_ATTEMPTS_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILEOUTPUTCOMMITTER_TASK_CLEANUP_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILEOUTPUTCOMMITTER_TASK_CLEANUP_ENABLED_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[An {@link OutputCommitter} that commits files specified 
+ in job output directory i.e. ${mapreduce.output.fileoutputformat.outputdir}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.output.FileOutputFormat -->
+  <class name="FileOutputFormat" extends="org.apache.hadoop.mapreduce.OutputFormat"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FileOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setCompressOutput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="compress" type="boolean"/>
+      <doc>
+      <![CDATA[Set whether the output of the job is compressed.
+ @param job the job to modify
+ @param compress should the output of the job be compressed?]]>
+      </doc>
+    </method>
+    <method name="getCompressOutput" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Is the job output compressed?
+ @param job the Job to look in
+ @return <code>true</code> if the job output should be compressed,
+         <code>false</code> otherwise]]>
+      </doc>
+    </method>
+    <method name="setOutputCompressorClass"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="codecClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs.
+ @param job the job to modify
+ @param codecClass the {@link CompressionCodec} to be used to
+                   compress the job outputs]]>
+      </doc>
+    </method>
+    <method name="getOutputCompressorClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <param name="defaultValue" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs.
+ @param job the {@link Job} to look in
+ @param defaultValue the {@link CompressionCodec} to return if not set
+ @return the {@link CompressionCodec} to be used to compress the 
+         job outputs
+ @throws IllegalArgumentException if the class was specified, but not found]]>
+      </doc>
+    </method>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapreduce.RecordWriter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="checkOutputSpecs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="setOutputPath"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="outputDir" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Set the {@link Path} of the output directory for the map-reduce job.
+
+ @param job The job to modify
+ @param outputDir the {@link Path} of the output directory for 
+ the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Get the {@link Path} to the output directory for the map-reduce job.
+ 
+ @return the {@link Path} to the output directory for the map-reduce job.
+ @see FileOutputFormat#getWorkOutputPath(TaskInputOutputContext)]]>
+      </doc>
+    </method>
+    <method name="getWorkOutputPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskInputOutputContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Get the {@link Path} to the task's temporary output directory 
+  for the map-reduce job
+  
+ <b id="SideEffectFiles">Tasks' Side-Effect Files</b>
+ 
+ <p>Some applications need to create/write-to side-files, which differ from
+ the actual job-outputs.
+ 
+ <p>In such cases there could be issues with 2 instances of the same TIP 
+ (running simultaneously e.g. speculative tasks) trying to open/write-to the
+ same file (path) on HDFS. Hence the application-writer will have to pick 
+ unique names per task-attempt (e.g. using the attemptid, say 
+ <tt>attempt_200709221812_0001_m_000000_0</tt>), not just per TIP.</p> 
+ 
+ <p>To get around this the Map-Reduce framework helps the application-writer 
+ out by maintaining a special 
+ <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> 
+ sub-directory for each task-attempt on HDFS where the output of the 
+ task-attempt goes. On successful completion of the task-attempt the files 
+ in the <tt>${mapreduce.output.fileoutputformat.outputdir}/_temporary/_${taskid}</tt> (only) 
+ are <i>promoted</i> to <tt>${mapreduce.output.fileoutputformat.outputdir}</tt>. Of course, the 
+ framework discards the sub-directory of unsuccessful task-attempts. This 
+ is completely transparent to the application.</p>
+ 
+ <p>The application-writer can take advantage of this by creating any 
+ side-files required in a work directory during execution 
+ of his task i.e. via 
+ {@link #getWorkOutputPath(TaskInputOutputContext)}, and
+ the framework will move them out similarly - thus she doesn't have to pick 
+ unique paths per task-attempt.</p>
+ 
+ <p>The entire discussion holds true for maps of jobs with 
+ reducer=NONE (i.e. 0 reduces) since output of the map, in that case, 
+ goes directly to HDFS.</p> 
+ 
+ @return the {@link Path} to the task's temporary output directory 
+ for the map-reduce job.]]>
+      </doc>
+    </method>
+    <method name="getPathForWorkFile" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskInputOutputContext"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="extension" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Helper function to generate a {@link Path} for a file that is unique for
+ the task within the job output directory.
+
+ <p>The path can be used to create custom files from within the map and
+ reduce tasks. The path name will be unique for each task. The path parent
+ will be the job output directory.</p>ls
+
+ <p>This method uses the {@link #getUniqueFile} method to make the file name
+ unique for the task.</p>
+
+ @param context the context for the task.
+ @param name the name for the file.
+ @param extension the extension for the file
+ @return a unique path accross all tasks of the job.]]>
+      </doc>
+    </method>
+    <method name="getUniqueFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="true"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="extension" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Generate a unique filename, based on the task id, name, and extension
+ @param context the task that is calling this
+ @param name the base filename
+ @param extension the filename extension
+ @return a string like $name-[mrsct]-$id$extension]]>
+      </doc>
+    </method>
+    <method name="getDefaultWorkFile" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <param name="extension" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the default path and filename for the output format.
+ @param context the task context
+ @param extension an extension to add to the filename
+ @return a full path $output/_temporary/$taskid/part-[mr]-$id
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getOutputName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Get the base output name for the output file.]]>
+      </doc>
+    </method>
+    <method name="setOutputName"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the base output name for output file to be created.]]>
+      </doc>
+    </method>
+    <method name="getOutputCommitter" return="org.apache.hadoop.mapreduce.OutputCommitter"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="BASE_OUTPUT_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="PART" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="COMPRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Configuration option: should output be compressed? {@value}.]]>
+      </doc>
+    </field>
+    <field name="COMPRESS_CODEC" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[If compression is enabled, name of codec: {@value}.]]>
+      </doc>
+    </field>
+    <field name="COMPRESS_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Type of compression {@value}: NONE, RECORD, BLOCK.
+ Generally only used in {@code SequenceFileOutputFormat}.]]>
+      </doc>
+    </field>
+    <field name="OUTDIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Destination directory of work: {@value}.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[A base class for {@link OutputFormat}s that read from {@link FileSystem}s.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.output.FileOutputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter -->
+  <class name="FileOutputFormatCounter" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.output.FileOutputFormatCounter -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.output.FilterOutputFormat -->
+  <class name="FilterOutputFormat" extends="org.apache.hadoop.mapreduce.OutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FilterOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FilterOutputFormat" type="org.apache.hadoop.mapreduce.OutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a FilterOutputFormat based on the underlying output format.
+ @param baseOut the underlying OutputFormat]]>
+      </doc>
+    </constructor>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapreduce.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="checkOutputSpecs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getOutputCommitter" return="org.apache.hadoop.mapreduce.OutputCommitter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <field name="baseOut" type="org.apache.hadoop.mapreduce.OutputFormat"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[FilterOutputFormat is a convenience class that wraps OutputFormat.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.output.FilterOutputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.output.LazyOutputFormat -->
+  <class name="LazyOutputFormat" extends="org.apache.hadoop.mapreduce.lib.output.FilterOutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="LazyOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setOutputFormatClass"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the underlying output format for LazyOutputFormat.
+ @param job the {@link Job} to modify
+ @param theClass the underlying class]]>
+      </doc>
+    </method>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapreduce.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="checkOutputSpecs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getOutputCommitter" return="org.apache.hadoop.mapreduce.OutputCommitter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <field name="OUTPUT_FORMAT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A Convenience class that creates output lazily.
+ Use in conjuction with org.apache.hadoop.mapreduce.lib.output.MultipleOutputs to recreate the
+ behaviour of org.apache.hadoop.mapred.lib.MultipleTextOutputFormat (etc) of the old Hadoop API.
+ See {@link MultipleOutputs} documentation for more information.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.output.LazyOutputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.output.MapFileOutputFormat -->
+  <class name="MapFileOutputFormat" extends="org.apache.hadoop.mapreduce.lib.output.FileOutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MapFileOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapreduce.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getReaders" return="org.apache.hadoop.io.MapFile.Reader[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="dir" type="org.apache.hadoop.fs.Path"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Open the output generated by this format.]]>
+      </doc>
+    </method>
+    <method name="getEntry" return="org.apache.hadoop.io.Writable"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="readers" type="org.apache.hadoop.io.MapFile.Reader[]"/>
+      <param name="partitioner" type="org.apache.hadoop.mapreduce.Partitioner"/>
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get an entry from output generated by this class.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[An {@link org.apache.hadoop.mapreduce.OutputFormat} that writes 
+ {@link MapFile}s.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.output.MapFileOutputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.output.MultipleOutputs -->
+  <class name="MultipleOutputs" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MultipleOutputs" type="org.apache.hadoop.mapreduce.TaskInputOutputContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Creates and initializes multiple outputs support,
+ it should be instantiated in the Mapper/Reducer setup method.
+
+ @param context the TaskInputOutputContext object]]>
+      </doc>
+    </constructor>
+    <method name="addNamedOutput"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="namedOutput" type="java.lang.String"/>
+      <param name="outputFormatClass" type="java.lang.Class"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valueClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Adds a named output for the job.
+
+ @param job               job to add the named output
+ @param namedOutput       named output name, it has to be a word, letters
+                          and numbers only, cannot be the word 'part' as
+                          that is reserved for the default output.
+ @param outputFormatClass OutputFormat class.
+ @param keyClass          key class
+ @param valueClass        value class]]>
+      </doc>
+    </method>
+    <method name="setCountersEnabled"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="enabled" type="boolean"/>
+      <doc>
+      <![CDATA[Enables or disables counters for the named outputs.
+ 
+ The counters group is the {@link MultipleOutputs} class name.
+ The names of the counters are the same as the named outputs. These
+ counters count the number records written to each output name.
+ By default these counters are disabled.
+
+ @param job    job  to enable counters
+ @param enabled indicates if the counters will be enabled or not.]]>
+      </doc>
+    </method>
+    <method name="getCountersEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Returns if the counters for the named outputs are enabled or not.
+ By default these counters are disabled.
+
+ @param job    the job 
+ @return TRUE if the counters are enabled, FALSE if they are disabled.]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="namedOutput" type="java.lang.String"/>
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Write key and value to the namedOutput.
+
+ Output path is a unique file generated for the namedOutput.
+ For example, {namedOutput}-(m|r)-{part-number}
+ 
+ @param namedOutput the named output name
+ @param key         the key
+ @param value       the value]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="namedOutput" type="java.lang.String"/>
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <param name="baseOutputPath" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Write key and value to baseOutputPath using the namedOutput.
+ 
+ @param namedOutput    the named output name
+ @param key            the key
+ @param value          the value
+ @param baseOutputPath base-output path to write the record to.
+ Note: Framework will generate unique filename for the baseOutputPath
+ <b>Warning</b>: when the baseOutputPath is a path that resolves
+ outside of the final job output directory, the directory is created
+ immediately and then persists through subsequent task retries, breaking
+ the concept of output committing.]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="KEYOUT"/>
+      <param name="value" type="VALUEOUT"/>
+      <param name="baseOutputPath" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Write key value to an output file name.
+ 
+ Gets the record writer from job's output format.  
+ Job's output format should be a FileOutputFormat.
+ 
+ @param key       the key
+ @param value     the value
+ @param baseOutputPath base-output path to write the record to.
+ Note: Framework will generate unique filename for the baseOutputPath
+ <b>Warning</b>: when the baseOutputPath is a path that resolves
+ outside of the final job output directory, the directory is created
+ immediately and then persists through subsequent task retries, breaking
+ the concept of output committing.]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Closes all the opened outputs.
+ 
+ This should be called from cleanup method of map/reduce task.
+ If overridden subclasses must invoke <code>super.close()</code> at the
+ end of their <code>close()</code>]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The MultipleOutputs class simplifies writing output data 
+ to multiple outputs
+ 
+ <p> 
+ Case one: writing to additional outputs other than the job default output.
+
+ Each additional output, or named output, may be configured with its own
+ <code>OutputFormat</code>, with its own key class and with its own value
+ class.
+ </p>
+ 
+ <p>
+ Case two: to write data to different files provided by user
+ </p>
+ 
+ <p>
+ MultipleOutputs supports counters, by default they are disabled. The 
+ counters group is the {@link MultipleOutputs} class name. The names of the 
+ counters are the same as the output name. These count the number records 
+ written to each output name.
+ </p>
+ 
+ Usage pattern for job submission:
+ <pre>
+
+ Job job = new Job();
+
+ FileInputFormat.setInputPath(job, inDir);
+ FileOutputFormat.setOutputPath(job, outDir);
+
+ job.setMapperClass(MOMap.class);
+ job.setReducerClass(MOReduce.class);
+ ...
+
+ // Defines additional single text based output 'text' for the job
+ MultipleOutputs.addNamedOutput(job, "text", TextOutputFormat.class,
+ LongWritable.class, Text.class);
+
+ // Defines additional sequence-file based output 'sequence' for the job
+ MultipleOutputs.addNamedOutput(job, "seq",
+   SequenceFileOutputFormat.class,
+   LongWritable.class, Text.class);
+ ...
+
+ job.waitForCompletion(true);
+ ...
+ </pre>
+ <p>
+ Usage in Reducer:
+ <pre>
+ &lt;K, V&gt; String generateFileName(K k, V v) {
+   return k.toString() + "_" + v.toString();
+ }
+ 
+ public class MOReduce extends
+   Reducer&lt;WritableComparable, Writable,WritableComparable, Writable&gt; {
+ private MultipleOutputs mos;
+ public void setup(Context context) {
+ ...
+ mos = new MultipleOutputs(context);
+ }
+
+ public void reduce(WritableComparable key, Iterator&lt;Writable&gt; values,
+ Context context)
+ throws IOException {
+ ...
+ mos.write("text", , key, new Text("Hello"));
+ mos.write("seq", LongWritable(1), new Text("Bye"), "seq_a");
+ mos.write("seq", LongWritable(2), key, new Text("Chau"), "seq_b");
+ mos.write(key, new Text("value"), generateFileName(key, new Text("value")));
+ ...
+ }
+
+ public void cleanup(Context) throws IOException {
+ mos.close();
+ ...
+ }
+
+ }
+ </pre>
+ 
+ <p>
+ When used in conjuction with org.apache.hadoop.mapreduce.lib.output.LazyOutputFormat,
+ MultipleOutputs can mimic the behaviour of MultipleTextOutputFormat and MultipleSequenceFileOutputFormat
+ from the old Hadoop API - ie, output can be written from the Reducer to more than one location.
+ </p>
+ 
+ <p>
+ Use <code>MultipleOutputs.write(KEYOUT key, VALUEOUT value, String baseOutputPath)</code> to write key and 
+ value to a path specified by <code>baseOutputPath</code>, with no need to specify a named output.
+ <b>Warning</b>: when the baseOutputPath passed to MultipleOutputs.write
+ is a path that resolves outside of the final job output directory, the
+ directory is created immediately and then persists through subsequent
+ task retries, breaking the concept of output committing:
+ </p>
+ 
+ <pre>
+ private MultipleOutputs&lt;Text, Text&gt; out;
+ 
+ public void setup(Context context) {
+   out = new MultipleOutputs&lt;Text, Text&gt;(context);
+   ...
+ }
+ 
+ public void reduce(Text key, Iterable&lt;Text&gt; values, Context context) throws IOException, InterruptedException {
+ for (Text t : values) {
+   out.write(key, t, generateFileName(&lt;<i>parameter list...</i>&gt;));
+   }
+ }
+ 
+ protected void cleanup(Context context) throws IOException, InterruptedException {
+   out.close();
+ }
+ </pre>
+ 
+ <p>
+ Use your own code in <code>generateFileName()</code> to create a custom path to your results. 
+ '/' characters in <code>baseOutputPath</code> will be translated into directory levels in your file system. 
+ Also, append your custom-generated path with "part" or similar, otherwise your output will be -00000, -00001 etc. 
+ No call to <code>context.write()</code> is necessary. See example <code>generateFileName()</code> code below. 
+ </p>
+ 
+ <pre>
+ private String generateFileName(Text k) {
+   // expect Text k in format "Surname|Forename"
+   String[] kStr = k.toString().split("\\|");
+   
+   String sName = kStr[0];
+   String fName = kStr[1];
+
+   // example for k = Smith|John
+   // output written to /user/hadoop/path/to/output/Smith/John-r-00000 (etc)
+   return sName + "/" + fName;
+ }
+ </pre>
+ 
+ <p>
+ Using MultipleOutputs in this way will still create zero-sized default output, eg part-00000.
+ To prevent this use <code>LazyOutputFormat.setOutputFormatClass(job, TextOutputFormat.class);</code>
+ instead of <code>job.setOutputFormatClass(TextOutputFormat.class);</code> in your Hadoop job configuration.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.output.MultipleOutputs -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.output.NullOutputFormat -->
+  <class name="NullOutputFormat" extends="org.apache.hadoop.mapreduce.OutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NullOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapreduce.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+    </method>
+    <method name="checkOutputSpecs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.JobContext"/>
+    </method>
+    <method name="getOutputCommitter" return="org.apache.hadoop.mapreduce.OutputCommitter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+    </method>
+    <doc>
+    <![CDATA[Consume all outputs and put them in /dev/null.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.output.NullOutputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.output.PartialFileOutputCommitter -->
+  <class name="PartialFileOutputCommitter" extends="org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.mapreduce.lib.output.PartialOutputCommitter"/>
+    <constructor name="PartialFileOutputCommitter" type="org.apache.hadoop.fs.Path, org.apache.hadoop.mapreduce.TaskAttemptContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <constructor name="PartialFileOutputCommitter" type="org.apache.hadoop.fs.Path, org.apache.hadoop.mapreduce.JobContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="getCommittedTaskPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="int"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+    </method>
+    <method name="cleanUpPartialOutputForTask"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[An {@link OutputCommitter} that commits files specified
+ in job output directory i.e. ${mapreduce.output.fileoutputformat.outputdir}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.output.PartialFileOutputCommitter -->
+  <!-- start interface org.apache.hadoop.mapreduce.lib.output.PartialOutputCommitter -->
+  <interface name="PartialOutputCommitter"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="cleanUpPartialOutputForTask"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Remove all previously committed outputs from prior executions of this task.
+ @param context Context for cleaning up previously promoted output.
+ @throws IOException If cleanup fails, then the state of the task my not be
+                     well defined.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Interface for an {@link org.apache.hadoop.mapreduce.OutputCommitter}
+ implementing partial commit of task output, as during preemption.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.mapreduce.lib.output.PartialOutputCommitter -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter -->
+  <class name="PathOutputCommitter" extends="org.apache.hadoop.mapreduce.OutputCommitter"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="PathOutputCommitter" type="org.apache.hadoop.fs.Path, org.apache.hadoop.mapreduce.TaskAttemptContext"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Constructor for a task attempt.
+ Subclasses should provide a public constructor with this signature.
+ @param outputPath output path: may be null
+ @param context task context
+ @throws IOException IO problem]]>
+      </doc>
+    </constructor>
+    <constructor name="PathOutputCommitter" type="org.apache.hadoop.fs.Path, org.apache.hadoop.mapreduce.JobContext"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Constructor for a job attempt.
+ Subclasses should provide a public constructor with this signature.
+ @param outputPath output path: may be null
+ @param context task context
+ @throws IOException IO problem]]>
+      </doc>
+    </constructor>
+    <method name="getOutputPath" return="org.apache.hadoop.fs.Path"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the final directory where work will be placed once the job
+ is committed. This may be null, in which case, there is no output
+ path to write data to.
+ @return the path where final output of the job should be placed.]]>
+      </doc>
+    </method>
+    <method name="hasOutputPath" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Predicate: is there an output path?
+ @return true if we have an output path set, else false.]]>
+      </doc>
+    </method>
+    <method name="getWorkPath" return="org.apache.hadoop.fs.Path"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the directory that the task should write results into.
+ Warning: there's no guarantee that this work path is on the same
+ FS as the final output, or that it's visible across machines.
+ May be null.
+ @return the work directory
+ @throws IOException IO problem]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A committer which somehow commits data written to a working directory
+ to the final directory during the commit process. The reference
+ implementation of this is the {@link FileOutputCommitter}.
+
+ There are two constructors, both of which do nothing but long and
+ validate their arguments.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.output.SequenceFileAsBinaryOutputFormat -->
+  <class name="SequenceFileAsBinaryOutputFormat" extends="org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SequenceFileAsBinaryOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setSequenceFileOutputKeyClass"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the key class for the {@link SequenceFile}
+ <p>This allows the user to specify the key class to be different 
+ from the actual class ({@link BytesWritable}) used for writing </p>
+ 
+ @param job the {@link Job} to modify
+ @param theClass the SequenceFile output key class.]]>
+      </doc>
+    </method>
+    <method name="setSequenceFileOutputValueClass"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="theClass" type="java.lang.Class"/>
+      <doc>
+      <![CDATA[Set the value class for the {@link SequenceFile}
+ <p>This allows the user to specify the value class to be different 
+ from the actual class ({@link BytesWritable}) used for writing </p>
+ 
+ @param job the {@link Job} to modify
+ @param theClass the SequenceFile output key class.]]>
+      </doc>
+    </method>
+    <method name="getSequenceFileOutputKeyClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Get the key class for the {@link SequenceFile}
+ 
+ @return the key class of the {@link SequenceFile}]]>
+      </doc>
+    </method>
+    <method name="getSequenceFileOutputValueClass" return="java.lang.Class"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Get the value class for the {@link SequenceFile}
+ 
+ @return the value class of the {@link SequenceFile}]]>
+      </doc>
+    </method>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapreduce.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="checkOutputSpecs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <field name="KEY_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="VALUE_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[An {@link org.apache.hadoop.mapreduce.OutputFormat} that writes keys, 
+ values to {@link SequenceFile}s in binary(raw) format]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.output.SequenceFileAsBinaryOutputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat -->
+  <class name="SequenceFileOutputFormat" extends="org.apache.hadoop.mapreduce.lib.output.FileOutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SequenceFileOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getSequenceWriter" return="org.apache.hadoop.io.SequenceFile.Writer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <param name="keyClass" type="java.lang.Class"/>
+      <param name="valueClass" type="java.lang.Class"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapreduce.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="getOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Get the {@link CompressionType} for the output {@link SequenceFile}.
+ @param job the {@link Job}
+ @return the {@link CompressionType} for the output {@link SequenceFile}, 
+         defaulting to {@link CompressionType#RECORD}]]>
+      </doc>
+    </method>
+    <method name="setOutputCompressionType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/>
+      <doc>
+      <![CDATA[Set the {@link CompressionType} for the output {@link SequenceFile}.
+ @param job the {@link Job} to modify
+ @param style the {@link CompressionType} for the output
+              {@link SequenceFile}]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[An {@link OutputFormat} that writes {@link SequenceFile}s.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat -->
+  <class name="TextOutputFormat" extends="org.apache.hadoop.mapreduce.lib.output.FileOutputFormat"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TextOutputFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecordWriter" return="org.apache.hadoop.mapreduce.RecordWriter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.TaskAttemptContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <field name="SEPARATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SEPERATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #SEPARATOR}">
+      <doc>
+      <![CDATA[@deprecated Use {@link #SEPARATOR}]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[An {@link OutputFormat} that writes plain text files.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.partition">
+  <!-- start class org.apache.hadoop.mapreduce.lib.partition.BinaryPartitioner -->
+  <class name="BinaryPartitioner" extends="org.apache.hadoop.mapreduce.Partitioner"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="BinaryPartitioner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setOffsets"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="left" type="int"/>
+      <param name="right" type="int"/>
+      <doc>
+      <![CDATA[Set the subarray to be used for partitioning to 
+ <code>bytes[left:(right+1)]</code> in Python syntax.
+ 
+ @param conf configuration object
+ @param left left Python-style offset
+ @param right right Python-style offset]]>
+      </doc>
+    </method>
+    <method name="setLeftOffset"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="offset" type="int"/>
+      <doc>
+      <![CDATA[Set the subarray to be used for partitioning to 
+ <code>bytes[offset:]</code> in Python syntax.
+ 
+ @param conf configuration object
+ @param offset left Python-style offset]]>
+      </doc>
+    </method>
+    <method name="setRightOffset"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="offset" type="int"/>
+      <doc>
+      <![CDATA[Set the subarray to be used for partitioning to 
+ <code>bytes[:(offset+1)]</code> in Python syntax.
+ 
+ @param conf configuration object
+ @param offset right Python-style offset]]>
+      </doc>
+    </method>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPartition" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.io.BinaryComparable"/>
+      <param name="value" type="V"/>
+      <param name="numPartitions" type="int"/>
+      <doc>
+      <![CDATA[Use (the specified slice of the array returned by) 
+ {@link BinaryComparable#getBytes()} to partition.]]>
+      </doc>
+    </method>
+    <field name="LEFT_OFFSET_PROPERTY_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RIGHT_OFFSET_PROPERTY_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[<p>Partition {@link BinaryComparable} keys using a configurable part of 
+ the bytes array returned by {@link BinaryComparable#getBytes()}.</p>
+ 
+ <p>The subarray to be used for the partitioning can be defined by means
+ of the following properties:
+ <ul>
+   <li>
+     <i>mapreduce.partition.binarypartitioner.left.offset</i>:
+     left offset in array (0 by default)
+   </li>
+   <li>
+     <i>mapreduce.partition.binarypartitioner.right.offset</i>: 
+     right offset in array (-1 by default)
+   </li>
+ </ul>
+ Like in Python, both negative and positive offsets are allowed, but
+ the meaning is slightly different. In case of an array of length 5,
+ for instance, the possible offsets are:
+ <pre><code>
+  +---+---+---+---+---+
+  | B | B | B | B | B |
+  +---+---+---+---+---+
+    0   1   2   3   4
+   -5  -4  -3  -2  -1
+ </code></pre>
+ The first row of numbers gives the position of the offsets 0...5 in 
+ the array; the second row gives the corresponding negative offsets. 
+ Contrary to Python, the specified subarray has byte <code>i</code> 
+ and <code>j</code> as first and last element, repectively, when 
+ <code>i</code> and <code>j</code> are the left and right offset.
+ 
+ <p>For Hadoop programs written in Java, it is advisable to use one of 
+ the following static convenience methods for setting the offsets:
+ <ul>
+   <li>{@link #setOffsets}</li>
+   <li>{@link #setLeftOffset}</li>
+   <li>{@link #setRightOffset}</li>
+ </ul>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.partition.BinaryPartitioner -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner -->
+  <class name="HashPartitioner" extends="org.apache.hadoop.mapreduce.Partitioner"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="HashPartitioner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getPartition" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <param name="numReduceTasks" type="int"/>
+      <doc>
+      <![CDATA[Use {@link Object#hashCode()} to partition.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Partition keys by their {@link Object#hashCode()}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.partition.InputSampler -->
+  <class name="InputSampler" extends="org.apache.hadoop.conf.Configured"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.util.Tool"/>
+    <constructor name="InputSampler" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="writePartitionFile"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="sampler" type="org.apache.hadoop.mapreduce.lib.partition.InputSampler.Sampler"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Write a partition file for the given job, using the Sampler provided.
+ Queries the sampler for a sample keyset, sorts by the output key
+ comparator, selects the keys for each rank, and writes to the destination
+ returned from {@link TotalOrderPartitioner#getPartitionFile}.]]>
+      </doc>
+    </method>
+    <method name="run" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[Driver for InputSampler from the command line.
+ Configures a JobConf instance and calls {@link #writePartitionFile}.]]>
+      </doc>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <doc>
+    <![CDATA[Utility for collecting samples and writing a partition file for
+ {@link TotalOrderPartitioner}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.partition.InputSampler -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.partition.KeyFieldBasedComparator -->
+  <class name="KeyFieldBasedComparator" extends="org.apache.hadoop.io.WritableComparator"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="KeyFieldBasedComparator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="compare" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="b1" type="byte[]"/>
+      <param name="s1" type="int"/>
+      <param name="l1" type="int"/>
+      <param name="b2" type="byte[]"/>
+      <param name="s2" type="int"/>
+      <param name="l2" type="int"/>
+    </method>
+    <method name="setKeyFieldComparatorOptions"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="keySpec" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the {@link KeyFieldBasedComparator} options used to compare keys.
+ 
+ @param keySpec the key specification of the form -k pos1[,pos2], where,
+  pos is of the form f[.c][opts], where f is the number
+  of the key field to use, and c is the number of the first character from
+  the beginning of the field. Fields and character posns are numbered 
+  starting with 1; a character position of zero in pos2 indicates the
+  field's last character. If '.c' is omitted from pos1, it defaults to 1
+  (the beginning of the field); if omitted from pos2, it defaults to 0 
+  (the end of the field). opts are ordering options. The supported options
+  are:
+    -n, (Sort numerically)
+    -r, (Reverse the result of comparison)]]>
+      </doc>
+    </method>
+    <method name="getKeyFieldComparatorOption" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Get the {@link KeyFieldBasedComparator} options]]>
+      </doc>
+    </method>
+    <field name="COMPARATOR_OPTIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This comparator implementation provides a subset of the features provided
+ by the Unix/GNU Sort. In particular, the supported features are:
+ -n, (Sort numerically)
+ -r, (Reverse the result of comparison)
+ -k pos1[,pos2], where pos is of the form f[.c][opts], where f is the number
+  of the field to use, and c is the number of the first character from the
+  beginning of the field. Fields and character posns are numbered starting
+  with 1; a character position of zero in pos2 indicates the field's last
+  character. If '.c' is omitted from pos1, it defaults to 1 (the beginning
+  of the field); if omitted from pos2, it defaults to 0 (the end of the
+  field). opts are ordering options (any of 'nr' as described above). 
+ We assume that the fields in the key are separated by 
+ {@link JobContext#MAP_OUTPUT_KEY_FIELD_SEPARATOR}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.partition.KeyFieldBasedComparator -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.partition.KeyFieldBasedPartitioner -->
+  <class name="KeyFieldBasedPartitioner" extends="org.apache.hadoop.mapreduce.Partitioner"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="KeyFieldBasedPartitioner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPartition" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K2"/>
+      <param name="value" type="V2"/>
+      <param name="numReduceTasks" type="int"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="b" type="byte[]"/>
+      <param name="start" type="int"/>
+      <param name="end" type="int"/>
+      <param name="currentHash" type="int"/>
+    </method>
+    <method name="getPartition" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="hash" type="int"/>
+      <param name="numReduceTasks" type="int"/>
+    </method>
+    <method name="setKeyFieldPartitionerOptions"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="keySpec" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the {@link KeyFieldBasedPartitioner} options used for 
+ {@link Partitioner}
+ 
+ @param keySpec the key specification of the form -k pos1[,pos2], where,
+  pos is of the form f[.c][opts], where f is the number
+  of the key field to use, and c is the number of the first character from
+  the beginning of the field. Fields and character posns are numbered 
+  starting with 1; a character position of zero in pos2 indicates the
+  field's last character. If '.c' is omitted from pos1, it defaults to 1
+  (the beginning of the field); if omitted from pos2, it defaults to 0 
+  (the end of the field).]]>
+      </doc>
+    </method>
+    <method name="getKeyFieldPartitionerOption" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.JobContext"/>
+      <doc>
+      <![CDATA[Get the {@link KeyFieldBasedPartitioner} options]]>
+      </doc>
+    </method>
+    <field name="PARTITIONER_OPTIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Defines a way to partition keys based on certain key fields (also see
+  {@link KeyFieldBasedComparator}.
+  The key specification supported is of the form -k pos1[,pos2], where,
+  pos is of the form f[.c][opts], where f is the number
+  of the key field to use, and c is the number of the first character from
+  the beginning of the field. Fields and character posns are numbered 
+  starting with 1; a character position of zero in pos2 indicates the
+  field's last character. If '.c' is omitted from pos1, it defaults to 1
+  (the beginning of the field); if omitted from pos2, it defaults to 0 
+  (the end of the field).]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.partition.KeyFieldBasedPartitioner -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.partition.RehashPartitioner -->
+  <class name="RehashPartitioner" extends="org.apache.hadoop.mapreduce.Partitioner"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RehashPartitioner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getPartition" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <param name="numReduceTasks" type="int"/>
+      <doc>
+      <![CDATA[Rehash {@link Object#hashCode()} to partition.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This partitioner rehashes values returned by {@link Object#hashCode()}
+  to get smoother distribution between partitions which may improve
+  reduce reduce time in some cases and should harm things in no cases.
+  This partitioner is suggested with Integer and Long keys with simple
+  patterns in their distributions.
+  @since 2.0.3]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.partition.RehashPartitioner -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner -->
+  <class name="TotalOrderPartitioner" extends="org.apache.hadoop.mapreduce.Partitioner"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.conf.Configurable"/>
+    <constructor name="TotalOrderPartitioner"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="setConf"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Read in the partition file and build indexing data structures.
+ If the keytype is {@link org.apache.hadoop.io.BinaryComparable} and
+ <tt>total.order.partitioner.natural.order</tt> is not false, a trie
+ of the first <tt>total.order.partitioner.max.trie.depth</tt>(2) + 1 bytes
+ will be built. Otherwise, keys will be located using a binary search of
+ the partition keyset using the {@link org.apache.hadoop.io.RawComparator}
+ defined for this job. The input file must be sorted with the same
+ comparator and contain {@link Job#getNumReduceTasks()} - 1 keys.]]>
+      </doc>
+    </method>
+    <method name="getConf" return="org.apache.hadoop.conf.Configuration"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPartition" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="K"/>
+      <param name="value" type="V"/>
+      <param name="numPartitions" type="int"/>
+    </method>
+    <method name="setPartitionFile"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="p" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Set the path to the SequenceFile storing the sorted partition keyset.
+ It must be the case that for <tt>R</tt> reduces, there are <tt>R-1</tt>
+ keys in the SequenceFile.]]>
+      </doc>
+    </method>
+    <method name="getPartitionFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the path to the SequenceFile storing the sorted partition keyset.
+ @see #setPartitionFile(Configuration, Path)]]>
+      </doc>
+    </method>
+    <field name="DEFAULT_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="PARTITIONER_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MAX_TRIE_DEPTH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NATURAL_ORDER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Partitioner effecting a total order by reading split points from
+ an externally generated source.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner -->
+</package>
+<package name="org.apache.hadoop.mapreduce.lib.reduce">
+  <!-- start class org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer -->
+  <class name="IntSumReducer" extends="org.apache.hadoop.mapreduce.Reducer"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="IntSumReducer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="reduce"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="Key"/>
+      <param name="values" type="java.lang.Iterable"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer -->
+  <class name="LongSumReducer" extends="org.apache.hadoop.mapreduce.Reducer"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="LongSumReducer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="reduce"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="KEY"/>
+      <param name="values" type="java.lang.Iterable"/>
+      <param name="context" type="org.apache.hadoop.mapreduce.Reducer.Context"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer -->
+  <!-- start class org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer -->
+  <class name="WrappedReducer" extends="org.apache.hadoop.mapreduce.Reducer"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="WrappedReducer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getReducerContext" return="org.apache.hadoop.mapreduce.Reducer.Context"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reduceContext" type="org.apache.hadoop.mapreduce.ReduceContext"/>
+      <doc>
+      <![CDATA[A a wrapped {@link Reducer.Context} for custom implementations.
+ @param reduceContext <code>ReduceContext</code> to be wrapped
+ @return a wrapped <code>Reducer.Context</code> for custom implementations]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A {@link Reducer} which wraps a given one to allow for custom 
+ {@link Reducer.Context} implementations.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer -->
+</package>
+<package name="org.apache.hadoop.mapreduce.server.jobtracker">
+</package>
+<package name="org.apache.hadoop.mapreduce.server.tasktracker">
+</package>
+<package name="org.apache.hadoop.mapreduce.task.annotation">
+  <!-- start class org.apache.hadoop.mapreduce.task.annotation.Checkpointable -->
+  <class name="Checkpointable"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.annotation.Annotation"/>
+    <doc>
+    <![CDATA[Contract representing to the framework that the task can be safely preempted
+ and restarted between invocations of the user-defined function.
+
+ This is often true when the result of a function does not rely on state
+ derived from previous elements in the record stream, but the guarantee is
+ left as an exercise to the implementor.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.task.annotation.Checkpointable -->
+</package>
+<package name="org.apache.hadoop.mapreduce.tools">
+  <!-- start class org.apache.hadoop.mapreduce.tools.CLI -->
+  <class name="CLI" extends="org.apache.hadoop.conf.Configured"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.util.Tool"/>
+    <constructor name="CLI"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="CLI" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="run" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="argv" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="getCounter" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="counters" type="org.apache.hadoop.mapreduce.Counters"/>
+      <param name="counterGroupName" type="java.lang.String"/>
+      <param name="counterName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getTaskLogURL" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="taskId" type="org.apache.hadoop.mapreduce.TaskAttemptID"/>
+      <param name="baseUrl" type="java.lang.String"/>
+    </method>
+    <method name="displayTasks"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="job" type="org.apache.hadoop.mapreduce.Job"/>
+      <param name="type" type="java.lang.String"/>
+      <param name="state" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Display the information about a job's tasks, of a particular type and
+ in a particular state
+ 
+ @param job the job
+ @param type the type of the task (map/reduce/setup/cleanup)
+ @param state the state of the task 
+ (pending/running/completed/failed/killed)
+ @throws IOException when there is an error communicating with the master
+ @throws InterruptedException
+ @throws IllegalArgumentException if an invalid type/state is passed]]>
+      </doc>
+    </method>
+    <method name="displayJobList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="jobs" type="org.apache.hadoop.mapreduce.JobStatus[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="argv" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <field name="cluster" type="org.apache.hadoop.mapreduce.Cluster"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="headerPattern" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="dataPattern" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Interprets the map reduce cli options]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.mapreduce.tools.CLI -->
+</package>
+<package name="org.apache.hadoop.mapreduce.v2">
+</package>
+
+</api>
diff --git a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_3.3.4.xml b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_3.3.4.xml
new file mode 100644
index 0000000..dd96ac9
--- /dev/null
+++ b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_3.3.4.xml
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Fri Jul 29 14:16:00 GMT 2022 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop MapReduce JobClient 3.3.4"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/hadoop-annotations.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/jdiff.jar -verbose -classpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/classes:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/target/hadoop-mapreduce-client-common-3.3.4.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-3.3.4.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-yarn-client-3.3.4.jar:/maven/org/eclipse/jetty/websocket/websocket-client/9.4.43.v20210629/websocket-client-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-client/9.4.43.v20210629/jetty-client-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/websocket/websocket-common/9.4.43.v20210629/websocket-common-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/websocket/websocket-api/9.4.43.v20210629/websocket-api-9.4.43.v20210629.jar:/maven/org/jline/jline/3.9.0/jline-3.9.0.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/target/hadoop-mapreduce-client-core-3.3.4.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.12.7/jackson-databind-2.12.7.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-3.3.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-3.3.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/9.8.1/nimbus-jose-jwt-9.8.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/2.4.7/json-smart-2.4.7.jar:/maven/net/minidev/accessors-smart/2.4.7/accessors-smart-2.4.7.jar:/maven/org/ow2/asm/asm/5.0.4/asm-5.0.4.jar:/maven/org/apache/curator/curator-framework/4.2.0/curator-framework-4.2.0.jar:/maven/org/apache/kerby/kerb-simplekdc/1.0.1/kerb-simplekdc-1.0.1.jar:/maven/org/apache/kerby/kerb-client/1.0.1/kerb-client-1.0.1.jar:/maven/org/apache/kerby/kerby-config/1.0.1/kerby-config-1.0.1.jar:/maven/org/apache/kerby/kerb-common/1.0.1/kerb-common-1.0.1.jar:/maven/org/apache/kerby/kerb-crypto/1.0.1/kerb-crypto-1.0.1.jar:/maven/org/apache/kerby/kerb-util/1.0.1/kerb-util-1.0.1.jar:/maven/org/apache/kerby/token-provider/1.0.1/token-provider-1.0.1.jar:/maven/org/apache/kerby/kerb-admin/1.0.1/kerb-admin-1.0.1.jar:/maven/org/apache/kerby/kerb-server/1.0.1/kerb-server-1.0.1.jar:/maven/org/apache/kerby/kerb-identity/1.0.1/kerb-identity-1.0.1.jar:/maven/org/apache/kerby/kerby-xdr/1.0.1/kerby-xdr-1.0.1.jar:/maven/javax/xml/bind/jaxb-api/2.2.11/jaxb-api-2.2.11.jar:/maven/org/apache/commons/commons-compress/1.21/commons-compress-1.21.jar:/maven/javax/servlet/javax.servlet-api/3.1.0/javax.servlet-api-3.1.0.jar:/maven/org/eclipse/jetty/jetty-util/9.4.43.v20210629/jetty-util-9.4.43.v20210629.jar:/maven/com/sun/jersey/jersey-core/1.19/jersey-core-1.19.jar:/maven/com/sun/jersey/jersey-client/1.19/jersey-client-1.19.jar:/maven/commons-io/commons-io/2.8.0/commons-io-2.8.0.jar:/maven/com/google/inject/guice/4.0/guice-4.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/jersey-server/1.19/jersey-server-1.19.jar:/maven/com/sun/jersey/jersey-json/1.19/jersey-json-1.19.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.19/jersey-guice-1.19.jar:/maven/ch/qos/reload4j/reload4j/1.2.22/reload4j-1.2.22.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.12.7/jackson-core-2.12.7.jar:/maven/com/fasterxml/jackson/module/jackson-module-jaxb-annotations/2.12.7/jackson-module-jaxb-annotations-2.12.7.jar:/maven/jakarta/xml/bind/jakarta.xml.bind-api/2.3.2/jakarta.xml.bind-api-2.3.2.jar:/maven/com/fasterxml/jackson/jaxrs/jackson-jaxrs-json-provider/2.12.7/jackson-jaxrs-json-provider-2.12.7.jar:/maven/com/fasterxml/jackson/jaxrs/jackson-jaxrs-base/2.12.7/jackson-jaxrs-base-2.12.7.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/org/eclipse/jetty/jetty-util-ajax/9.4.43.v20210629/jetty-util-ajax-9.4.43.v20210629.jar:/maven/org/apache/curator/curator-client/4.2.0/curator-client-4.2.0.jar:/maven/org/apache/zookeeper/zookeeper/3.5.6/zookeeper-3.5.6.jar:/maven/org/apache/zookeeper/zookeeper-jute/3.5.6/zookeeper-jute-3.5.6.jar:/maven/org/apache/yetus/audience-annotations/0.5.0/audience-annotations-0.5.0.jar:/maven/org/eclipse/jetty/jetty-server/9.4.43.v20210629/jetty-server-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-http/9.4.43.v20210629/jetty-http-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-io/9.4.43.v20210629/jetty-io-9.4.43.v20210629.jar:/maven/javax/ws/rs/jsr311-api/1.1.1/jsr311-api-1.1.1.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.1.8.2/snappy-java-1.1.8.2.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-3.3.4.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-protobuf_3_7/1.1.1/hadoop-shaded-protobuf_3_7-1.1.1.jar:/maven/com/google/guava/guava/27.0-jre/guava-27.0-jre.jar:/maven/com/google/guava/failureaccess/1.0/failureaccess-1.0.jar:/maven/com/google/guava/listenablefuture/9999.0-empty-to-avoid-conflict-with-guava/listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar:/maven/org/checkerframework/checker-qual/2.5.2/checker-qual-2.5.2.jar:/maven/com/google/j2objc/j2objc-annotations/1.1/j2objc-annotations-1.1.jar:/maven/org/codehaus/mojo/animal-sniffer-annotations/1.17/animal-sniffer-annotations-1.17.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/org/apache/httpcomponents/httpclient/4.5.13/httpclient-4.5.13.jar:/maven/org/apache/httpcomponents/httpcore/4.4.13/httpcore-4.4.13.jar:/maven/commons-net/commons-net/3.6/commons-net-3.6.jar:/maven/jakarta/activation/jakarta.activation-api/1.2.1/jakarta.activation-api-1.2.1.jar:/maven/org/eclipse/jetty/jetty-servlet/9.4.43.v20210629/jetty-servlet-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-security/9.4.43.v20210629/jetty-security-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-webapp/9.4.43.v20210629/jetty-webapp-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-xml/9.4.43.v20210629/jetty-xml-9.4.43.v20210629.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-servlet/1.19/jersey-servlet-1.19.jar:/maven/commons-beanutils/commons-beanutils/1.9.4/commons-beanutils-1.9.4.jar:/maven/org/apache/commons/commons-configuration2/2.1.1/commons-configuration2-2.1.1.jar:/maven/org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.jar:/maven/org/apache/commons/commons-text/1.4/commons-text-1.4.jar:/maven/com/google/re2j/re2j/1.1/re2j-1.1.jar:/maven/com/google/code/gson/gson/2.8.9/gson-2.8.9.jar:/maven/com/jcraft/jsch/0.1.55/jsch-0.1.55.jar:/maven/org/apache/curator/curator-recipes/4.2.0/curator-recipes-4.2.0.jar:/maven/com/google/code/findbugs/jsr305/3.0.2/jsr305-3.0.2.jar:/maven/org/apache/kerby/kerb-core/1.0.1/kerb-core-1.0.1.jar:/maven/org/apache/kerby/kerby-pkix/1.0.1/kerby-pkix-1.0.1.jar:/maven/org/apache/kerby/kerby-asn1/1.0.1/kerby-asn1-1.0.1.jar:/maven/org/apache/kerby/kerby-util/1.0.1/kerby-util-1.0.1.jar:/maven/org/codehaus/woodstox/stax2-api/4.2.1/stax2-api-4.2.1.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.3.0/woodstox-core-5.3.0.jar:/maven/dnsjava/dnsjava/2.1.7/dnsjava-2.1.7.jar:/maven/org/slf4j/slf4j-api/1.7.36/slf4j-api-1.7.36.jar:/maven/org/slf4j/slf4j-reload4j/1.7.36/slf4j-reload4j-1.7.36.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-3.3.4.jar:/usr/lib/jvm/java-8-openjdk-amd64/lib/tools.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.12.7/jackson-annotations-2.12.7.jar:/maven/com/google/inject/extensions/guice-servlet/4.0/guice-servlet-4.0.jar:/maven/io/netty/netty/3.10.6.Final/netty-3.10.6.Final.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-guava/1.1.1/hadoop-shaded-guava-1.1.1.jar:/maven/commons-codec/commons-codec/1.15/commons-codec-1.15.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/xerces/xercesImpl/2.12.2/xercesImpl-2.12.2.jar:/maven/xml-apis/xml-apis/1.4.01/xml-apis-1.4.01.jar -sourcepath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/hadoop-annotations.jar:/build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/jdiff.jar -apidir /build/source/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/target/site/jdiff/xml -apiname Apache Hadoop MapReduce JobClient 3.3.4 -->
+<package name="org.apache.hadoop.mapred">
+</package>
+
+</api>
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 9af619a..53e2de6 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -134,7 +134,7 @@
         <activeByDefault>false</activeByDefault>
       </activation>
       <properties>
-        <jdiff.stable.api>3.3.3</jdiff.stable.api>
+        <jdiff.stable.api>3.3.4</jdiff.stable.api>
         <jdiff.stability>-unstable</jdiff.stability>
         <!-- Commented out for HADOOP-11776 -->
         <!-- Uncomment param name="${jdiff.compatibility}" in javadoc doclet if compatibility is not empty -->
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_3.3.4.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_3.3.4.xml
new file mode 100644
index 0000000..f96c8d5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_API_3.3.4.xml
@@ -0,0 +1,26407 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Fri Jul 29 14:08:01 GMT 2022 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop YARN API 3.3.4"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-annotations.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/jdiff.jar -verbose -classpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/classes:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-guava/1.1.1/hadoop-shaded-guava-1.1.1.jar:/maven/javax/xml/bind/jaxb-api/2.2.11/jaxb-api-2.2.11.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-3.3.4.jar:/maven/com/google/guava/guava/27.0-jre/guava-27.0-jre.jar:/maven/com/google/guava/failureaccess/1.0/failureaccess-1.0.jar:/maven/com/google/guava/listenablefuture/9999.0-empty-to-avoid-conflict-with-guava/listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar:/maven/org/checkerframework/checker-qual/2.5.2/checker-qual-2.5.2.jar:/maven/com/google/j2objc/j2objc-annotations/1.1/j2objc-annotations-1.1.jar:/maven/org/codehaus/mojo/animal-sniffer-annotations/1.17/animal-sniffer-annotations-1.17.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/org/apache/httpcomponents/httpclient/4.5.13/httpclient-4.5.13.jar:/maven/org/apache/httpcomponents/httpcore/4.4.13/httpcore-4.4.13.jar:/maven/commons-codec/commons-codec/1.15/commons-codec-1.15.jar:/maven/commons-io/commons-io/2.8.0/commons-io-2.8.0.jar:/maven/commons-net/commons-net/3.6/commons-net-3.6.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/javax/servlet/javax.servlet-api/3.1.0/javax.servlet-api-3.1.0.jar:/maven/jakarta/activation/jakarta.activation-api/1.2.1/jakarta.activation-api-1.2.1.jar:/maven/org/eclipse/jetty/jetty-server/9.4.43.v20210629/jetty-server-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-http/9.4.43.v20210629/jetty-http-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-io/9.4.43.v20210629/jetty-io-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-util/9.4.43.v20210629/jetty-util-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-servlet/9.4.43.v20210629/jetty-servlet-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-security/9.4.43.v20210629/jetty-security-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-util-ajax/9.4.43.v20210629/jetty-util-ajax-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-webapp/9.4.43.v20210629/jetty-webapp-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-xml/9.4.43.v20210629/jetty-xml-9.4.43.v20210629.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-core/1.19/jersey-core-1.19.jar:/maven/javax/ws/rs/jsr311-api/1.1.1/jsr311-api-1.1.1.jar:/maven/com/sun/jersey/jersey-servlet/1.19/jersey-servlet-1.19.jar:/maven/com/sun/jersey/jersey-json/1.19/jersey-json-1.19.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/com/sun/jersey/jersey-server/1.19/jersey-server-1.19.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/ch/qos/reload4j/reload4j/1.2.22/reload4j-1.2.22.jar:/maven/commons-beanutils/commons-beanutils/1.9.4/commons-beanutils-1.9.4.jar:/maven/org/apache/commons/commons-configuration2/2.1.1/commons-configuration2-2.1.1.jar:/maven/org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.jar:/maven/org/apache/commons/commons-text/1.4/commons-text-1.4.jar:/maven/org/slf4j/slf4j-api/1.7.36/slf4j-api-1.7.36.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/com/google/re2j/re2j/1.1/re2j-1.1.jar:/maven/com/google/code/gson/gson/2.8.9/gson-2.8.9.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-3.3.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/9.8.1/nimbus-jose-jwt-9.8.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/2.4.7/json-smart-2.4.7.jar:/maven/net/minidev/accessors-smart/2.4.7/accessors-smart-2.4.7.jar:/maven/org/ow2/asm/asm/5.0.4/asm-5.0.4.jar:/maven/org/apache/curator/curator-framework/4.2.0/curator-framework-4.2.0.jar:/maven/org/apache/kerby/kerb-simplekdc/1.0.1/kerb-simplekdc-1.0.1.jar:/maven/org/apache/kerby/kerb-client/1.0.1/kerb-client-1.0.1.jar:/maven/org/apache/kerby/kerby-config/1.0.1/kerby-config-1.0.1.jar:/maven/org/apache/kerby/kerb-common/1.0.1/kerb-common-1.0.1.jar:/maven/org/apache/kerby/kerb-crypto/1.0.1/kerb-crypto-1.0.1.jar:/maven/org/apache/kerby/kerb-util/1.0.1/kerb-util-1.0.1.jar:/maven/org/apache/kerby/token-provider/1.0.1/token-provider-1.0.1.jar:/maven/org/apache/kerby/kerb-admin/1.0.1/kerb-admin-1.0.1.jar:/maven/org/apache/kerby/kerb-server/1.0.1/kerb-server-1.0.1.jar:/maven/org/apache/kerby/kerb-identity/1.0.1/kerb-identity-1.0.1.jar:/maven/org/apache/kerby/kerby-xdr/1.0.1/kerby-xdr-1.0.1.jar:/maven/com/jcraft/jsch/0.1.55/jsch-0.1.55.jar:/maven/org/apache/curator/curator-client/4.2.0/curator-client-4.2.0.jar:/maven/org/apache/curator/curator-recipes/4.2.0/curator-recipes-4.2.0.jar:/maven/com/google/code/findbugs/jsr305/3.0.2/jsr305-3.0.2.jar:/maven/org/apache/zookeeper/zookeeper/3.5.6/zookeeper-3.5.6.jar:/maven/org/apache/zookeeper/zookeeper-jute/3.5.6/zookeeper-jute-3.5.6.jar:/maven/org/apache/yetus/audience-annotations/0.5.0/audience-annotations-0.5.0.jar:/maven/org/apache/commons/commons-compress/1.21/commons-compress-1.21.jar:/maven/org/apache/kerby/kerb-core/1.0.1/kerb-core-1.0.1.jar:/maven/org/apache/kerby/kerby-pkix/1.0.1/kerby-pkix-1.0.1.jar:/maven/org/apache/kerby/kerby-asn1/1.0.1/kerby-asn1-1.0.1.jar:/maven/org/apache/kerby/kerby-util/1.0.1/kerby-util-1.0.1.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.12.7/jackson-databind-2.12.7.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.12.7/jackson-core-2.12.7.jar:/maven/org/codehaus/woodstox/stax2-api/4.2.1/stax2-api-4.2.1.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.3.0/woodstox-core-5.3.0.jar:/maven/dnsjava/dnsjava/2.1.7/dnsjava-2.1.7.jar:/maven/org/xerial/snappy/snappy-java/1.1.8.2/snappy-java-1.1.8.2.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-3.3.4.jar:/usr/lib/jvm/java-8-openjdk-amd64/lib/tools.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-protobuf_3_7/1.1.1/hadoop-shaded-protobuf_3_7-1.1.1.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.12.7/jackson-annotations-2.12.7.jar:/maven/xerces/xercesImpl/2.12.2/xercesImpl-2.12.2.jar:/maven/xml-apis/xml-apis/1.4.01/xml-apis-1.4.01.jar -sourcepath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-annotations.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/jdiff.jar -apidir /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/site/jdiff/xml -apiname Apache Hadoop YARN API 3.3.4 -->
+<package name="org.apache.hadoop.yarn.ams">
+</package>
+<package name="org.apache.hadoop.yarn.api">
+  <!-- start interface org.apache.hadoop.yarn.api.ApplicationClientProtocol -->
+  <interface name="ApplicationClientProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"/>
+    <method name="getNewApplication" return="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to obtain a new {@link ApplicationId} for 
+ submitting new applications.</p>
+ 
+ <p>The <code>ResourceManager</code> responds with a new, monotonically
+ increasing, {@link ApplicationId} which is used by the client to submit
+ a new application.</p>
+
+ <p>The <code>ResourceManager</code> also responds with details such 
+ as maximum resource capabilities in the cluster as specified in
+ {@link GetNewApplicationResponse}.</p>
+
+ @param request request to get a new <code>ApplicationId</code>
+ @return response containing the new <code>ApplicationId</code> to be used
+ to submit an application
+ @throws YarnException
+ @throws IOException
+ @see #submitApplication(SubmitApplicationRequest)]]>
+      </doc>
+    </method>
+    <method name="submitApplication" return="org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to submit a new application to the
+ <code>ResourceManager.</code></p>
+ 
+ <p>The client is required to provide details such as queue, 
+ {@link Resource} required to run the <code>ApplicationMaster</code>, 
+ the equivalent of {@link ContainerLaunchContext} for launching
+ the <code>ApplicationMaster</code> etc. via the 
+ {@link SubmitApplicationRequest}.</p>
+ 
+ <p>Currently the <code>ResourceManager</code> sends an immediate (empty) 
+ {@link SubmitApplicationResponse} on accepting the submission and throws 
+ an exception if it rejects the submission. However, this call needs to be
+ followed by {@link #getApplicationReport(GetApplicationReportRequest)}
+ to make sure that the application gets properly submitted - obtaining a
+ {@link SubmitApplicationResponse} from ResourceManager doesn't guarantee
+ that RM 'remembers' this application beyond failover or restart. If RM
+ failover or RM restart happens before ResourceManager saves the
+ application's state successfully, the subsequent
+ {@link #getApplicationReport(GetApplicationReportRequest)} will throw
+ a {@link ApplicationNotFoundException}. The Clients need to re-submit
+ the application with the same {@link ApplicationSubmissionContext} when
+ it encounters the {@link ApplicationNotFoundException} on the
+ {@link #getApplicationReport(GetApplicationReportRequest)} call.</p>
+ 
+ <p>During the submission process, it checks whether the application
+ already exists. If the application exists, it will simply return
+ SubmitApplicationResponse</p>
+
+ <p> In secure mode,the <code>ResourceManager</code> verifies access to
+ queues etc. before accepting the application submission.</p>
+ 
+ @param request request to submit a new application
+ @return (empty) response on accepting the submission
+ @throws YarnException
+ @throws IOException
+ @see #getNewApplication(GetNewApplicationRequest)]]>
+      </doc>
+    </method>
+    <method name="failApplicationAttempt" return="org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to request the 
+ <code>ResourceManager</code> to fail an application attempt.</p>
+
+ <p>The client, via {@link FailApplicationAttemptRequest} provides the
+ {@link ApplicationAttemptId} of the attempt to be failed.</p>
+
+ <p> In secure mode,the <code>ResourceManager</code> verifies access to the
+ application, queue etc. before failing the attempt.</p>
+
+ <p>Currently, the <code>ResourceManager</code> returns an empty response
+ on success and throws an exception on rejecting the request.</p>
+
+ @param request request to fail an attempt
+ @return <code>ResourceManager</code> returns an empty response
+         on success and throws an exception on rejecting the request
+ @throws YarnException
+ @throws IOException
+ @see #getQueueUserAcls(GetQueueUserAclsInfoRequest)]]>
+      </doc>
+    </method>
+    <method name="forceKillApplication" return="org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to request the
+ <code>ResourceManager</code> to abort submitted application.</p>
+ 
+ <p>The client, via {@link KillApplicationRequest} provides the
+ {@link ApplicationId} of the application to be aborted.</p>
+ 
+ <p> In secure mode,the <code>ResourceManager</code> verifies access to the
+ application, queue etc. before terminating the application.</p> 
+ 
+ <p>Currently, the <code>ResourceManager</code> returns an empty response
+ on success and throws an exception on rejecting the request.</p>
+ 
+ @param request request to abort a submitted application
+ @return <code>ResourceManager</code> returns an empty response
+         on success and throws an exception on rejecting the request
+ @throws YarnException
+ @throws IOException
+ @see #getQueueUserAcls(GetQueueUserAclsInfoRequest)]]>
+      </doc>
+    </method>
+    <method name="getClusterMetrics" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to get metrics about the cluster from
+ the <code>ResourceManager</code>.</p>
+ 
+ <p>The <code>ResourceManager</code> responds with a
+ {@link GetClusterMetricsResponse} which includes the 
+ {@link YarnClusterMetrics} with details such as number of current
+ nodes in the cluster.</p>
+ 
+ @param request request for cluster metrics
+ @return cluster metrics
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getClusterNodes" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to get a report of all nodes
+ in the cluster from the <code>ResourceManager</code>.</p>
+ 
+ <p>The <code>ResourceManager</code> responds with a 
+ {@link GetClusterNodesResponse} which includes the 
+ {@link NodeReport} for all the nodes in the cluster.</p>
+ 
+ @param request request for report on all nodes
+ @return report on all nodes
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueInfo" return="org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to get information about <em>queues</em>
+ from the <code>ResourceManager</code>.</p>
+ 
+ <p>The client, via {@link GetQueueInfoRequest}, can ask for details such
+ as used/total resources, child queues, running applications etc.</p>
+
+ <p> In secure mode,the <code>ResourceManager</code> verifies access before
+ providing the information.</p> 
+ 
+ @param request request to get queue information
+ @return queue information
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueUserAcls" return="org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to get information about <em>queue 
+ acls</em> for <em>current user</em> from the <code>ResourceManager</code>.
+ </p>
+ 
+ <p>The <code>ResourceManager</code> responds with queue acls for all
+ existing queues.</p>
+ 
+ @param request request to get queue acls for <em>current user</em>
+ @return queue acls for <em>current user</em>
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="moveApplicationAcrossQueues" return="org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Move an application to a new queue.
+ 
+ @param request the application ID and the target queue
+ @return an empty response
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getNewReservation" return="org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to obtain a new {@link ReservationId} for
+ submitting new reservations.</p>
+
+ <p>The <code>ResourceManager</code> responds with a new, unique,
+ {@link ReservationId} which is used by the client to submit
+ a new reservation.</p>
+
+ @param request to get a new <code>ReservationId</code>
+ @return response containing the new <code>ReservationId</code> to be used
+ to submit a new reservation
+ @throws YarnException if the reservation system is not enabled.
+ @throws IOException on IO failures.
+ @see #submitReservation(ReservationSubmissionRequest)]]>
+      </doc>
+    </method>
+    <method name="submitReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to submit a new reservation to the
+ {@code ResourceManager}.
+ </p>
+ 
+ <p>
+ The client packages all details of its request in a
+ {@link ReservationSubmissionRequest} object. This contains information
+ about the amount of capacity, temporal constraints, and concurrency needs.
+ Furthermore, the reservation might be composed of multiple stages, with
+ ordering dependencies among them.
+ </p>
+ 
+ <p>
+ In order to respond, a new admission control component in the
+ {@code ResourceManager} performs an analysis of the resources that have
+ been committed over the period of time the user is requesting, verify that
+ the user requests can be fulfilled, and that it respect a sharing policy
+ (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined
+ that the ReservationSubmissionRequest is satisfiable the
+ {@code ResourceManager} answers with a
+ {@link ReservationSubmissionResponse} that include a non-null
+ {@link ReservationId}. Upon failure to find a valid allocation the response
+ is an exception with the reason.
+ 
+ On application submission the client can use this {@link ReservationId} to
+ obtain access to the reserved resources.
+ </p>
+ 
+ <p>
+ The system guarantees that during the time-range specified by the user, the
+ reservationID will be corresponding to a valid reservation. The amount of
+ capacity dedicated to such queue can vary overtime, depending of the
+ allocation that has been determined. But it is guaranteed to satisfy all
+ the constraint expressed by the user in the
+ {@link ReservationSubmissionRequest}.
+ </p>
+ 
+ @param request the request to submit a new Reservation
+ @return response the {@link ReservationId} on accepting the submission
+ @throws YarnException if the request is invalid or reservation cannot be
+           created successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="updateReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to update an existing Reservation. This is
+ referred to as a re-negotiation process, in which a user that has
+ previously submitted a Reservation.
+ </p>
+ 
+ <p>
+ The allocation is attempted by virtually substituting all previous
+ allocations related to this Reservation with new ones, that satisfy the new
+ {@link ReservationUpdateRequest}. Upon success the previous allocation is
+ substituted by the new one, and on failure (i.e., if the system cannot find
+ a valid allocation for the updated request), the previous allocation
+ remains valid.
+ 
+ The {@link ReservationId} is not changed, and applications currently
+ running within this reservation will automatically receive the resources
+ based on the new allocation.
+ </p>
+ 
+ @param request to update an existing Reservation (the ReservationRequest
+          should refer to an existing valid {@link ReservationId})
+ @return response empty on successfully updating the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           updated successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="deleteReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to remove an existing Reservation.
+ 
+ Upon deletion of a reservation applications running with this reservation,
+ are automatically downgraded to normal jobs running without any dedicated
+ reservation.
+ </p>
+ 
+ @param request to remove an existing Reservation (the ReservationRequest
+          should refer to an existing valid {@link ReservationId})
+ @return response empty on successfully deleting the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           deleted successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="listReservations" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get the list of reservations in a plan.
+ The reservationId will be used to search for reservations to list if it is
+ provided. Otherwise, it will select active reservations within the
+ startTime and endTime (inclusive).
+ </p>
+
+ @param request to list reservations in a plan. Contains fields to select
+                String queue, ReservationId reservationId, long startTime,
+                long endTime, and a bool includeReservationAllocations.
+
+                queue: Required. Cannot be null or empty. Refers to the
+                reservable queue in the scheduler that was selected when
+                creating a reservation submission
+                {@link ReservationSubmissionRequest}.
+
+                reservationId: Optional. If provided, other fields will
+                be ignored.
+
+                startTime: Optional. If provided, only reservations that
+                end after the startTime will be selected. This defaults
+                to 0 if an invalid number is used.
+
+                endTime: Optional. If provided, only reservations that
+                start on or before endTime will be selected. This defaults
+                to Long.MAX_VALUE if an invalid number is used.
+
+                includeReservationAllocations: Optional. Flag that
+                determines whether the entire reservation allocations are
+                to be returned. Reservation allocations are subject to
+                change in the event of re-planning as described by
+                {@code ReservationDefinition}.
+
+ @return response that contains information about reservations that are
+                being searched for.
+ @throws YarnException if the request is invalid
+ @throws IOException on IO failures]]>
+      </doc>
+    </method>
+    <method name="getNodeToLabels" return="org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node to labels mappings in existing cluster
+ </p>
+
+ @param request
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getLabelsToNodes" return="org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetLabelsToNodesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get labels to nodes mappings
+ in existing cluster
+ </p>
+
+ @param request
+ @return labels to nodes mappings
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeLabels" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node labels in the cluster
+ </p>
+
+ @param request to get node labels collection of this cluster
+ @return node labels collection of this cluster
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="updateApplicationPriority" return="org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to set priority of an application.
+ </p>
+ @param request to set priority of an application
+ @return an empty response
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="signalToContainer" return="org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by clients to request the
+ <code>ResourceManager</code> to signal a container. For example,
+ the client can send command OUTPUT_THREAD_DUMP to dump threads of the
+ container.</p>
+
+ <p>The client, via {@link SignalContainerRequest} provides the
+ id of the container and the signal command. </p>
+
+ <p> In secure mode,the <code>ResourceManager</code> verifies access to the
+ application before signaling the container.
+ The user needs to have <code>MODIFY_APP</code> permission.</p>
+
+ <p>Currently, the <code>ResourceManager</code> returns an empty response
+ on success and throws an exception on rejecting the request.</p>
+
+ @param request request to signal a container
+ @return <code>ResourceManager</code> returns an empty response
+         on success and throws an exception on rejecting the request
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="updateApplicationTimeouts" return="org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to set ApplicationTimeouts of an application.
+ The UpdateApplicationTimeoutsRequest should have timeout value with
+ absolute time with ISO8601 format <b>yyyy-MM-dd'T'HH:mm:ss.SSSZ</b>.
+ </p>
+ <b>Note:</b> If application timeout value is less than or equal to current
+ time then update application throws YarnException.
+ @param request to set ApplicationTimeouts of an application
+ @return a response with updated timeouts.
+ @throws YarnException if update request has empty values or application is
+           in completing states.
+ @throws IOException on IO failures]]>
+      </doc>
+    </method>
+    <method name="getResourceProfiles" return="org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get all the resource profiles that are
+ available on the ResourceManager.
+ </p>
+ @param request request to get all the resource profiles
+ @return Response containing a map of the profile name to Resource
+         capabilities
+ @throws YARNFeatureNotEnabledException if resource-profile is disabled
+ @throws YarnException if any error happens inside YARN
+ @throws IOException in case of other errors]]>
+      </doc>
+    </method>
+    <method name="getResourceProfile" return="org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface to get the details for a specific resource profile.
+ </p>
+ @param request request to get the details of a resource profile
+ @return Response containing the details for a particular resource profile
+ @throws YARNFeatureNotEnabledException if resource-profile is disabled
+ @throws YarnException if any error happens inside YARN
+ @throws IOException in case of other errors]]>
+      </doc>
+    </method>
+    <method name="getResourceTypeInfo" return="org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface to get the details for a specific resource profile.
+ </p>
+ @param request request to get the details of a resource profile
+ @return Response containing the details for a particular resource profile
+ @throws YarnException if any error happens inside YARN
+ @throws IOException in case of other errors]]>
+      </doc>
+    </method>
+    <method name="getAttributesToNodes" return="org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get attributes to nodes mappings
+ available in ResourceManager.
+ </p>
+
+ @param request request to get details of attributes to nodes mapping.
+ @return Response containing the details of attributes to nodes mappings.
+ @throws YarnException if any error happens inside YARN
+ @throws IOException   incase of other errors]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeAttributes" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node attributes available in
+ ResourceManager.
+ </p>
+
+ @param request request to get node attributes collection of this cluster.
+ @return Response containing node attributes collection.
+ @throws YarnException if any error happens inside YARN.
+ @throws IOException   incase of other errors.]]>
+      </doc>
+    </method>
+    <method name="getNodesToAttributes" return="org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node to attributes mappings.
+ in existing cluster.
+ </p>
+
+ @param request request to get nodes to attributes mapping.
+ @return nodes to attributes mappings.
+ @throws YarnException if any error happens inside YARN.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The protocol between clients and the <code>ResourceManager</code>
+ to submit/abort jobs and to get information on applications, cluster metrics,
+ nodes, queues and ACLs.</p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ApplicationClientProtocol -->
+  <!-- start interface org.apache.hadoop.yarn.api.ApplicationConstants -->
+  <interface name="ApplicationConstants"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <field name="APP_SUBMIT_TIME_ENV" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The environment variable for APP_SUBMIT_TIME. Set in AppMaster environment
+ only]]>
+      </doc>
+    </field>
+    <field name="CONTAINER_TOKEN_FILE_ENV_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The cache file into which container token is written]]>
+      </doc>
+    </field>
+    <field name="KEYSTORE_FILE_LOCATION_ENV_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The file into which the keystore containing the AM's certificate is
+ written.]]>
+      </doc>
+    </field>
+    <field name="KEYSTORE_PASSWORD_ENV_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The password for the AM's keystore.]]>
+      </doc>
+    </field>
+    <field name="TRUSTSTORE_FILE_LOCATION_ENV_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The file into which the truststore containing the AM's certificate is
+ written.]]>
+      </doc>
+    </field>
+    <field name="TRUSTSTORE_PASSWORD_ENV_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The password for the AM's truststore.]]>
+      </doc>
+    </field>
+    <field name="APPLICATION_WEB_PROXY_BASE_ENV" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The environmental variable for APPLICATION_WEB_PROXY_BASE. Set in
+ ApplicationMaster's environment only. This states that for all non-relative
+ web URLs in the app masters web UI what base should they have.]]>
+      </doc>
+    </field>
+    <field name="LOG_DIR_EXPANSION_VAR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The temporary environmental variable for container log directory. This
+ should be replaced by real container log directory on container launch.]]>
+      </doc>
+    </field>
+    <field name="CLASS_PATH_SEPARATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This constant is used to construct class path and it will be replaced with
+ real class path separator(':' for Linux and ';' for Windows) by
+ NodeManager on container launch. User has to use this constant to construct
+ class path if user wants cross-platform practice i.e. submit an application
+ from a Windows client to a Linux/Unix server or vice versa.]]>
+      </doc>
+    </field>
+    <field name="PARAMETER_EXPANSION_LEFT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The following two constants are used to expand parameter and it will be
+ replaced with real parameter expansion marker ('%' for Windows and '$' for
+ Linux) by NodeManager on container launch. For example: {{VAR}} will be
+ replaced as $VAR on Linux, and %VAR% on Windows. User has to use this
+ constant to construct class path if user wants cross-platform practice i.e.
+ submit an application from a Windows client to a Linux/Unix server or vice
+ versa.]]>
+      </doc>
+    </field>
+    <field name="PARAMETER_EXPANSION_RIGHT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[User has to use this constant to construct class path if user wants
+ cross-platform practice i.e. submit an application from a Windows client to
+ a Linux/Unix server or vice versa.]]>
+      </doc>
+    </field>
+    <field name="STDERR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="STDOUT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This is the API for the applications comprising of constants that YARN sets
+ up for the applications and the containers.
+
+ TODO: Investigate the semantics and security of each cross-boundary refs.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ApplicationConstants -->
+  <!-- start class org.apache.hadoop.yarn.api.ApplicationConstants.ContainerLaunchType -->
+  <class name="ApplicationConstants.ContainerLaunchType" extends="java.lang.Enum"
+    abstract="false"
+    static="true" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.ApplicationConstants.ContainerLaunchType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.ApplicationConstants.ContainerLaunchType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[The type of launch for the container.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.ApplicationConstants.ContainerLaunchType -->
+  <!-- start interface org.apache.hadoop.yarn.api.ApplicationHistoryProtocol -->
+  <interface name="ApplicationHistoryProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.api.ApplicationBaseProtocol"/>
+    <doc>
+    <![CDATA[<p>
+ The protocol between clients and the <code>ApplicationHistoryServer</code> to
+ get the information of completed applications etc.
+ </p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ApplicationHistoryProtocol -->
+  <!-- start interface org.apache.hadoop.yarn.api.ApplicationMasterProtocol -->
+  <interface name="ApplicationMasterProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by a new <code>ApplicationMaster</code> to register with
+ the <code>ResourceManager</code>.
+ </p>
+
+ <p>
+ The <code>ApplicationMaster</code> needs to provide details such as RPC
+ Port, HTTP tracking url etc. as specified in
+ {@link RegisterApplicationMasterRequest}.
+ </p>
+
+ <p>
+ The <code>ResourceManager</code> responds with critical details such as
+ maximum resource capabilities in the cluster as specified in
+ {@link RegisterApplicationMasterResponse}.
+ </p>
+
+ <p>
+ Re-register is only allowed for <code>Unmanaged Application Master</code>
+ (UAM) HA, with
+ {@link org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext#getKeepContainersAcrossApplicationAttempts()}
+ set to true.
+ </p>
+
+ @param request registration request
+ @return registration respose
+ @throws YarnException
+ @throws IOException
+ @throws InvalidApplicationMasterRequestException The exception is thrown
+           when an ApplicationMaster tries to register more then once.
+ @see RegisterApplicationMasterRequest
+ @see RegisterApplicationMasterResponse]]>
+      </doc>
+    </method>
+    <method name="finishApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>The interface used by an <code>ApplicationMaster</code> to notify the 
+ <code>ResourceManager</code> about its completion (success or failed).</p>
+ 
+ <p>The <code>ApplicationMaster</code> has to provide details such as 
+ final state, diagnostics (in case of failures) etc. as specified in 
+ {@link FinishApplicationMasterRequest}.</p>
+ 
+ <p>The <code>ResourceManager</code> responds with 
+ {@link FinishApplicationMasterResponse}.</p>
+ 
+ @param request completion request
+ @return completion response
+ @throws YarnException
+ @throws IOException
+ @see FinishApplicationMasterRequest
+ @see FinishApplicationMasterResponse]]>
+      </doc>
+    </method>
+    <method name="allocate" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The main interface between an <code>ApplicationMaster</code> and the
+ <code>ResourceManager</code>.
+ </p>
+ 
+ <p>
+ The <code>ApplicationMaster</code> uses this interface to provide a list of
+ {@link ResourceRequest} and returns unused {@link Container} allocated to
+ it via {@link AllocateRequest}. Optionally, the
+ <code>ApplicationMaster</code> can also <em>blacklist</em> resources which
+ it doesn't want to use.
+ </p>
+ 
+ <p>
+ This also doubles up as a <em>heartbeat</em> to let the
+ <code>ResourceManager</code> know that the <code>ApplicationMaster</code>
+ is alive. Thus, applications should periodically make this call to be kept
+ alive. The frequency depends on
+ {@link YarnConfiguration#RM_AM_EXPIRY_INTERVAL_MS} which defaults to
+ {@link YarnConfiguration#DEFAULT_RM_AM_EXPIRY_INTERVAL_MS}.
+ </p>
+ 
+ <p>
+ The <code>ResourceManager</code> responds with list of allocated
+ {@link Container}, status of completed containers and headroom information
+ for the application.
+ </p>
+ 
+ <p>
+ The <code>ApplicationMaster</code> can use the available headroom
+ (resources) to decide how to utilized allocated resources and make informed
+ decisions about future resource requests.
+ </p>
+ 
+ @param request
+          allocation request
+ @return allocation response
+ @throws YarnException
+ @throws IOException
+ @throws InvalidApplicationMasterRequestException
+           This exception is thrown when an ApplicationMaster calls allocate
+           without registering first.
+ @throws InvalidResourceBlacklistRequestException
+           This exception is thrown when an application provides an invalid
+           specification for blacklist of resources.
+ @throws InvalidResourceRequestException
+           This exception is thrown when a {@link ResourceRequest} is out of
+           the range of the configured lower and upper limits on the
+           resources.
+ @see AllocateRequest
+ @see AllocateResponse]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The protocol between a live instance of <code>ApplicationMaster</code> 
+ and the <code>ResourceManager</code>.</p>
+ 
+ <p>This is used by the <code>ApplicationMaster</code> to register/unregister
+ and to request and obtain resources in the cluster from the
+ <code>ResourceManager</code>.</p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ApplicationMasterProtocol -->
+  <!-- start interface org.apache.hadoop.yarn.api.ClientSCMProtocol -->
+  <interface name="ClientSCMProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="use" return="org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to claim a resource with the
+ <code>SharedCacheManager.</code> The client uses a checksum to identify the
+ resource and an {@link ApplicationId} to identify which application will be
+ using the resource.
+ </p>
+
+ <p>
+ The <code>SharedCacheManager</code> responds with whether or not the
+ resource exists in the cache. If the resource exists, a <code>Path</code>
+ to the resource in the shared cache is returned. If the resource does not
+ exist, the response is empty.
+ </p>
+
+ @param request request to claim a resource in the shared cache
+ @return response indicating if the resource is already in the cache
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="release" return="org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to release a resource with the
+ <code>SharedCacheManager.</code> This method is called once an application
+ is no longer using a claimed resource in the shared cache. The client uses
+ a checksum to identify the resource and an {@link ApplicationId} to
+ identify which application is releasing the resource.
+ </p>
+
+ <p>
+ Note: This method is an optimization and the client is not required to call
+ it for correctness.
+ </p>
+
+ <p>
+ Currently the <code>SharedCacheManager</code> sends an empty response.
+ </p>
+
+ @param request request to release a resource in the shared cache
+ @return (empty) response on releasing the resource
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The protocol between clients and the <code>SharedCacheManager</code> to claim
+ and release resources in the shared cache.
+ </p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ClientSCMProtocol -->
+  <!-- start interface org.apache.hadoop.yarn.api.ContainerManagementProtocol -->
+  <interface name="ContainerManagementProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="startContainers" return="org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The <code>ApplicationMaster</code> provides a list of
+ {@link StartContainerRequest}s to a <code>NodeManager</code> to
+ <em>start</em> {@link Container}s allocated to it using this interface.
+ </p>
+ 
+ <p>
+ The <code>ApplicationMaster</code> has to provide details such as allocated
+ resource capability, security tokens (if enabled), command to be executed
+ to start the container, environment for the process, necessary
+ binaries/jar/shared-objects etc. via the {@link ContainerLaunchContext} in
+ the {@link StartContainerRequest}.
+ </p>
+ 
+ <p>
+ The <code>NodeManager</code> sends a response via
+ {@link StartContainersResponse} which includes a list of
+ {@link Container}s of successfully launched {@link Container}s, a
+ containerId-to-exception map for each failed {@link StartContainerRequest} in
+ which the exception indicates errors from per container and a
+ allServicesMetaData map between the names of auxiliary services and their
+ corresponding meta-data. Note: None-container-specific exceptions will
+ still be thrown by the API method itself.
+ </p>
+ <p>
+ The <code>ApplicationMaster</code> can use
+ {@link #getContainerStatuses(GetContainerStatusesRequest)} to get updated
+ statuses of the to-be-launched or launched containers.
+ </p>
+ 
+ @param request
+          request to start a list of containers
+ @return response including conatinerIds of all successfully launched
+         containers, a containerId-to-exception map for failed requests and
+         a allServicesMetaData map.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="stopContainers" return="org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The <code>ApplicationMaster</code> requests a <code>NodeManager</code> to
+ <em>stop</em> a list of {@link Container}s allocated to it using this
+ interface.
+ </p>
+ 
+ <p>
+ The <code>ApplicationMaster</code> sends a {@link StopContainersRequest}
+ which includes the {@link ContainerId}s of the containers to be stopped.
+ </p>
+ 
+ <p>
+ The <code>NodeManager</code> sends a response via
+ {@link StopContainersResponse} which includes a list of {@link ContainerId}
+ s of successfully stopped containers, a containerId-to-exception map for
+ each failed request in which the exception indicates errors from per
+ container. Note: None-container-specific exceptions will still be thrown by
+ the API method itself. <code>ApplicationMaster</code> can use
+ {@link #getContainerStatuses(GetContainerStatusesRequest)} to get updated
+ statuses of the containers.
+ </p>
+ 
+ @param request
+          request to stop a list of containers
+ @return response which includes a list of containerIds of successfully
+         stopped containers, a containerId-to-exception map for failed
+         requests.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerStatuses" return="org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The API used by the <code>ApplicationMaster</code> to request for current
+ statuses of <code>Container</code>s from the <code>NodeManager</code>.
+ </p>
+ 
+ <p>
+ The <code>ApplicationMaster</code> sends a
+ {@link GetContainerStatusesRequest} which includes the {@link ContainerId}s
+ of all containers whose statuses are needed.
+ </p>
+ 
+ <p>
+ The <code>NodeManager</code> responds with
+ {@link GetContainerStatusesResponse} which includes a list of
+ {@link ContainerStatus} of the successfully queried containers and a
+ containerId-to-exception map for each failed request in which the exception
+ indicates errors from per container. Note: None-container-specific
+ exceptions will still be thrown by the API method itself.
+ </p>
+ 
+ @param request
+          request to get <code>ContainerStatus</code>es of containers with
+          the specified <code>ContainerId</code>s
+ @return response containing the list of <code>ContainerStatus</code> of the
+         successfully queried containers and a containerId-to-exception map
+         for failed requests.
+ 
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="increaseContainersResource" return="org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The API used by the <code>ApplicationMaster</code> to request for
+ resource increase of running containers on the <code>NodeManager</code>.
+ </p>
+
+ @param request
+         request to increase resource of a list of containers
+ @return response which includes a list of containerIds of containers
+         whose resource has been successfully increased and a
+         containerId-to-exception map for failed requests.
+
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="updateContainer" return="org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The API used by the <code>ApplicationMaster</code> to request for
+ resource update of running containers on the <code>NodeManager</code>.
+ </p>
+
+ @param request
+         request to update resource of a list of containers
+ @return response which includes a list of containerIds of containers
+         whose resource has been successfully updated and a
+         containerId-to-exception map for failed requests.
+
+ @throws YarnException Exception specific to YARN
+ @throws IOException IOException thrown from NodeManager]]>
+      </doc>
+    </method>
+    <method name="signalToContainer" return="org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="localize" return="org.apache.hadoop.yarn.api.protocolrecords.ResourceLocalizationResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ResourceLocalizationRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Localize resources required by the container.
+ Currently, this API only works for running containers.
+
+ @param request Specify the resources to be localized.
+ @return Response that the localize request is accepted.
+ @throws YarnException Exception specific to YARN
+ @throws IOException IOException thrown from the RPC layer.]]>
+      </doc>
+    </method>
+    <method name="reInitializeContainer" return="org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[ReInitialize the Container with a new Launch Context.
+ @param request Specify the new ContainerLaunchContext.
+ @return Response that the ReInitialize request is accepted.
+ @throws YarnException Exception specific to YARN.
+ @throws IOException IOException thrown from the RPC layer.]]>
+      </doc>
+    </method>
+    <method name="restartContainer" return="org.apache.hadoop.yarn.api.protocolrecords.RestartContainerResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Restart the container.
+ @param containerId Container Id.
+ @return Response that the restart request is accepted.
+ @throws YarnException Exception specific to YARN.
+ @throws IOException IOException thrown from the RPC layer.]]>
+      </doc>
+    </method>
+    <method name="rollbackLastReInitialization" return="org.apache.hadoop.yarn.api.protocolrecords.RollbackResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Rollback the Last ReInitialization if possible.
+ @param containerId Container Id.
+ @return Response that the rollback request is accepted.
+ @throws YarnException Exception specific to YARN.
+ @throws IOException IOException thrown from the RPC layer.]]>
+      </doc>
+    </method>
+    <method name="commitLastReInitialization" return="org.apache.hadoop.yarn.api.protocolrecords.CommitResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Commit the Last ReInitialization if possible. Once the reinitialization
+ has been committed, It cannot be rolled back.
+ @param containerId Container Id.
+ @return Response that the commit request is accepted.
+ @throws YarnException Exception specific to YARN.
+ @throws IOException IOException thrown from the RPC layer.]]>
+      </doc>
+    </method>
+    <method name="getLocalizationStatuses" return="org.apache.hadoop.yarn.api.protocolrecords.GetLocalizationStatusesResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetLocalizationStatusesRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[API to request for the localization statuses of requested containers from
+ the Node Manager.
+ @param request {@link GetLocalizationStatusesRequest} which includes the
+                container ids of all the containers whose localization
+                statuses are needed.
+ @return {@link GetLocalizationStatusesResponse} which contains the
+         localization statuses of all the requested containers.
+ @throws YarnException Exception specific to YARN.
+ @throws IOException IOException thrown from the RPC layer.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The protocol between an <code>ApplicationMaster</code> and a 
+ <code>NodeManager</code> to start/stop and increase resource of containers
+ and to get status of running containers.</p>
+
+ <p>If security is enabled the <code>NodeManager</code> verifies that the
+ <code>ApplicationMaster</code> has truly been allocated the container
+ by the <code>ResourceManager</code> and also verifies all interactions such 
+ as stopping the container or obtaining status information for the container.
+ </p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.api.ContainerManagementProtocol -->
+</package>
+<package name="org.apache.hadoop.yarn.api.protocolrecords">
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest -->
+  <class name="AllocateRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AllocateRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseID" type="int"/>
+      <param name="appProgress" type="float"/>
+      <param name="resourceAsk" type="java.util.List"/>
+      <param name="containersToBeReleased" type="java.util.List"/>
+      <param name="resourceBlacklistRequest" type="org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseID" type="int"/>
+      <param name="appProgress" type="float"/>
+      <param name="resourceAsk" type="java.util.List"/>
+      <param name="containersToBeReleased" type="java.util.List"/>
+      <param name="resourceBlacklistRequest" type="org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest"/>
+      <param name="trackingUrl" type="java.lang.String"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseID" type="int"/>
+      <param name="appProgress" type="float"/>
+      <param name="resourceAsk" type="java.util.List"/>
+      <param name="containersToBeReleased" type="java.util.List"/>
+      <param name="updateRequests" type="java.util.List"/>
+      <param name="resourceBlacklistRequest" type="org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest"/>
+    </method>
+    <method name="getResponseId" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>response id</em> used to track duplicate responses.
+ @return <em>response id</em>]]>
+      </doc>
+    </method>
+    <method name="setResponseId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="id" type="int"/>
+      <doc>
+      <![CDATA[Set the <em>response id</em> used to track duplicate responses.
+ @param id <em>response id</em>]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>current progress</em> of application. 
+ @return <em>current progress</em> of application]]>
+      </doc>
+    </method>
+    <method name="setProgress"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="progress" type="float"/>
+      <doc>
+      <![CDATA[Set the <em>current progress</em> of application
+ @param progress <em>current progress</em> of application]]>
+      </doc>
+    </method>
+    <method name="getAskList" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <code>ResourceRequest</code> to update the 
+ <code>ResourceManager</code> about the application's resource requirements.
+ @return the list of <code>ResourceRequest</code>
+ @see ResourceRequest]]>
+      </doc>
+    </method>
+    <method name="setAskList"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceRequests" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set list of <code>ResourceRequest</code> to update the
+ <code>ResourceManager</code> about the application's resource requirements.
+ @param resourceRequests list of <code>ResourceRequest</code> to update the 
+                        <code>ResourceManager</code> about the application's 
+                        resource requirements
+ @see ResourceRequest]]>
+      </doc>
+    </method>
+    <method name="getReleaseList" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <code>ContainerId</code> of containers being 
+ released by the <code>ApplicationMaster</code>.
+ @return list of <code>ContainerId</code> of containers being 
+         released by the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="setReleaseList"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="releaseContainers" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the list of <code>ContainerId</code> of containers being
+ released by the <code>ApplicationMaster</code>
+ @param releaseContainers list of <code>ContainerId</code> of 
+                          containers being released by the 
+                          <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="getResourceBlacklistRequest" return="org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ResourceBlacklistRequest</code> being sent by the 
+ <code>ApplicationMaster</code>.
+ @return the <code>ResourceBlacklistRequest</code> being sent by the 
+         <code>ApplicationMaster</code>
+ @see ResourceBlacklistRequest]]>
+      </doc>
+    </method>
+    <method name="setResourceBlacklistRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceBlacklistRequest" type="org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest"/>
+      <doc>
+      <![CDATA[Set the <code>ResourceBlacklistRequest</code> to inform the 
+ <code>ResourceManager</code> about the blacklist additions and removals
+ per the <code>ApplicationMaster</code>.
+ 
+ @param resourceBlacklistRequest the <code>ResourceBlacklistRequest</code>  
+                         to inform the <code>ResourceManager</code> about  
+                         the blacklist additions and removals
+                         per the <code>ApplicationMaster</code>
+ @see ResourceBlacklistRequest]]>
+      </doc>
+    </method>
+    <method name="getUpdateRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of container update requests being sent by the
+ <code>ApplicationMaster</code>.
+ @return list of {@link UpdateContainerRequest}
+         being sent by the
+         <code>ApplicationMaster</code>.]]>
+      </doc>
+    </method>
+    <method name="setUpdateRequests"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="updateRequests" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the list of container update requests to inform the
+ <code>ResourceManager</code> about the containers that need to be
+ updated.
+ @param updateRequests list of <code>UpdateContainerRequest</code> for
+                       containers to be updated]]>
+      </doc>
+    </method>
+    <method name="getSchedulingRequests" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of Scheduling requests being sent by the
+ <code>ApplicationMaster</code>.
+ @return list of {@link SchedulingRequest} being sent by the
+         <code>ApplicationMaster</code>.]]>
+      </doc>
+    </method>
+    <method name="setSchedulingRequests"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="schedulingRequests" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the list of Scheduling requests to inform the
+ <code>ResourceManager</code> about the application's resource requirements
+ (potentially including allocation tags and placement constraints).
+ @param schedulingRequests list of {@link SchedulingRequest} to update
+          the <code>ResourceManager</code> about the application's resource
+          requirements.]]>
+      </doc>
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the tracking url update for this heartbeat.
+ @return tracking url to update this application with]]>
+      </doc>
+    </method>
+    <method name="setTrackingUrl"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="trackingUrl" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the new tracking url for this application.
+ @param trackingUrl the new tracking url]]>
+      </doc>
+    </method>
+    <method name="newBuilder" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest.AllocateRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[<p>The core request sent by the <code>ApplicationMaster</code> to the 
+ <code>ResourceManager</code> to obtain resources in the cluster.</p> 
+
+ <p>The request includes:
+ <ul>
+   <li>A response id to track duplicate responses.</li>
+   <li>Progress information.</li>
+   <li>
+     A list of {@link ResourceRequest} to inform the
+     <code>ResourceManager</code> about the application's
+     resource requirements.
+   </li>
+   <li>
+     A list of unused {@link Container} which are being returned.
+   </li>
+   <li>
+     A list of {@link UpdateContainerRequest} to inform
+     the <code>ResourceManager</code> about the change in
+     requirements of running containers.
+   </li>
+ </ul>
+ 
+ @see ApplicationMasterProtocol#allocate(AllocateRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest.AllocateRequestBuilder -->
+  <class name="AllocateRequest.AllocateRequestBuilder" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="responseId" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest.AllocateRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseId" type="int"/>
+      <doc>
+      <![CDATA[Set the <code>responseId</code> of the request.
+ @see AllocateRequest#setResponseId(int)
+ @param responseId <code>responseId</code> of the request
+ @return {@link AllocateRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="progress" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest.AllocateRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="progress" type="float"/>
+      <doc>
+      <![CDATA[Set the <code>progress</code> of the request.
+ @see AllocateRequest#setProgress(float)
+ @param progress <code>progress</code> of the request
+ @return {@link AllocateRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="askList" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest.AllocateRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="askList" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the <code>askList</code> of the request.
+ @see AllocateRequest#setAskList(List)
+ @param askList <code>askList</code> of the request
+ @return {@link AllocateRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="releaseList" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest.AllocateRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="releaseList" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the <code>releaseList</code> of the request.
+ @see AllocateRequest#setReleaseList(List)
+ @param releaseList <code>releaseList</code> of the request
+ @return {@link AllocateRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="resourceBlacklistRequest" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest.AllocateRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceBlacklistRequest" type="org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest"/>
+      <doc>
+      <![CDATA[Set the <code>resourceBlacklistRequest</code> of the request.
+ @see AllocateRequest#setResourceBlacklistRequest(
+ ResourceBlacklistRequest)
+ @param resourceBlacklistRequest
+     <code>resourceBlacklistRequest</code> of the request
+ @return {@link AllocateRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="updateRequests" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest.AllocateRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="updateRequests" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the <code>updateRequests</code> of the request.
+ @see AllocateRequest#setUpdateRequests(List)
+ @param updateRequests <code>updateRequests</code> of the request
+ @return {@link AllocateRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="schedulingRequests" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest.AllocateRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="schedulingRequests" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the <code>schedulingRequests</code> of the request.
+ @see AllocateRequest#setSchedulingRequests(List)
+ @param schedulingRequests <code>SchedulingRequest</code> of the request
+ @return {@link AllocateRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="trackingUrl" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest.AllocateRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="trackingUrl" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <code>trackingUrl</code> of the request.
+ @see AllocateRequest#setTrackingUrl(String)
+ @param trackingUrl new tracking url
+ @return {@link AllocateRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="build" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return generated {@link AllocateRequest} object.
+ @return {@link AllocateRequest}]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Class to construct instances of {@link AllocateRequest} with specific
+ options.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest.AllocateRequestBuilder -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse -->
+  <class name="AllocateResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AllocateResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseId" type="int"/>
+      <param name="completedContainers" type="java.util.List"/>
+      <param name="allocatedContainers" type="java.util.List"/>
+      <param name="updatedNodes" type="java.util.List"/>
+      <param name="availResources" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="command" type="org.apache.hadoop.yarn.api.records.AMCommand"/>
+      <param name="numClusterNodes" type="int"/>
+      <param name="preempt" type="org.apache.hadoop.yarn.api.records.PreemptionMessage"/>
+      <param name="nmTokens" type="java.util.List"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="responseId" type="int"/>
+      <param name="completedContainers" type="java.util.List"/>
+      <param name="allocatedContainers" type="java.util.List"/>
+      <param name="updatedNodes" type="java.util.List"/>
+      <param name="availResources" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="command" type="org.apache.hadoop.yarn.api.records.AMCommand"/>
+      <param name="numClusterNodes" type="int"/>
+      <param name="preempt" type="org.apache.hadoop.yarn.api.records.PreemptionMessage"/>
+      <param name="nmTokens" type="java.util.List"/>
+      <param name="amRMToken" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <param name="updatedContainers" type="java.util.List"/>
+      <param name="collectorInfo" type="org.apache.hadoop.yarn.api.records.CollectorInfo"/>
+    </method>
+    <method name="getAMCommand" return="org.apache.hadoop.yarn.api.records.AMCommand"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[If the <code>ResourceManager</code> needs the
+ <code>ApplicationMaster</code> to take some action then it will send an
+ AMCommand to the <code>ApplicationMaster</code>. See <code>AMCommand</code> 
+ for details on commands and actions for them.
+ @return <code>AMCommand</code> if the <code>ApplicationMaster</code> should
+         take action, <code>null</code> otherwise
+ @see AMCommand]]>
+      </doc>
+    </method>
+    <method name="getResponseId" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>last response id</em>.
+ @return <em>last response id</em>]]>
+      </doc>
+    </method>
+    <method name="getAllocatedContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <em>newly allocated</em> <code>Container</code> by the
+ <code>ResourceManager</code>.
+ @return list of <em>newly allocated</em> <code>Container</code>]]>
+      </doc>
+    </method>
+    <method name="getAvailableResources" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>available headroom</em> for resources in the cluster for the
+ application.
+ @return limit of available headroom for resources in the cluster for the
+ application]]>
+      </doc>
+    </method>
+    <method name="getCompletedContainersStatuses" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <em>completed containers' statuses</em>.
+ @return the list of <em>completed containers' statuses</em>]]>
+      </doc>
+    </method>
+    <method name="getUpdatedNodes" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <em>updated <code>NodeReport</code>s</em>. Updates could
+ be changes in health, availability etc of the nodes.
+ @return The delta of updated nodes since the last response]]>
+      </doc>
+    </method>
+    <method name="getNumClusterNodes" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of hosts available on the cluster.
+ @return the available host count.]]>
+      </doc>
+    </method>
+    <method name="getPreemptionMessage" return="org.apache.hadoop.yarn.api.records.PreemptionMessage"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the description of containers owned by the AM, but requested back by
+ the cluster. Note that the RM may have an inconsistent view of the
+ resources owned by the AM. These messages are advisory, and the AM may
+ elect to ignore them.
+ <p>
+ The message is a snapshot of the resources the RM wants back from the AM.
+ While demand persists, the RM will repeat its request; applications should
+ not interpret each message as a request for <em>additional</em>
+ resources on top of previous messages. Resources requested consistently
+ over some duration may be forcibly killed by the RM.
+
+ @return A specification of the resources to reclaim from this AM.]]>
+      </doc>
+    </method>
+    <method name="getNMTokens" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of NMTokens required for communicating with NM. New NMTokens
+ issued only if
+ <p>
+ 1) AM is receiving first container on underlying NodeManager.<br>
+ OR<br>
+ 2) NMToken master key rolled over in ResourceManager and AM is getting new
+ container on the same underlying NodeManager.
+ <p>
+ AM will receive one NMToken per NM irrespective of the number of containers
+ issued on same NM. AM is expected to store these tokens until issued a
+ new token for the same NM.
+ @return list of NMTokens required for communicating with NM]]>
+      </doc>
+    </method>
+    <method name="getUpdatedContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of newly updated containers by
+ <code>ResourceManager</code>.
+ @return list of newly increased containers]]>
+      </doc>
+    </method>
+    <method name="getAMRMToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The AMRMToken that belong to this attempt
+
+ @return The AMRMToken that belong to this attempt]]>
+      </doc>
+    </method>
+    <method name="getApplicationPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Priority of the application
+
+ @return get application priority]]>
+      </doc>
+    </method>
+    <method name="getCollectorInfo" return="org.apache.hadoop.yarn.api.records.CollectorInfo"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The data associated with the collector that belongs to this app. Contains
+ address and token alongwith identification information.
+
+ @return The data of collector that belong to this attempt]]>
+      </doc>
+    </method>
+    <method name="getUpdateErrors" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of container update errors to inform the
+ Application Master about the container updates that could not be
+ satisfied due to error.
+
+ @return List of Update Container Errors.]]>
+      </doc>
+    </method>
+    <method name="setUpdateErrors"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="updateErrors" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the list of container update errors to inform the
+ Application Master about the container updates that could not be
+ satisfied due to error.
+ @param updateErrors list of <code>UpdateContainerError</code> for
+                       containers updates requests that were in error]]>
+      </doc>
+    </method>
+    <method name="getContainersFromPreviousAttempts" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of running containers as viewed by
+ <code>ResourceManager</code> from previous application attempts which
+ have not been reported to the Application Master yet.
+ <br>
+ These containers were recovered by the RM after the application master
+ had already registered. This may happen after RM restart when some NMs get
+ delayed in connecting to the RM and reporting the active containers.
+ Since they were not reported in the registration
+ response, they are reported in the response to the AM heartbeat.
+
+ @return the list of running containers as viewed by
+         <code>ResourceManager</code> from previous application attempts.]]>
+      </doc>
+    </method>
+    <method name="getRejectedSchedulingRequests" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a list of all SchedulingRequests that the RM has rejected between
+ this allocate call and the previous one.
+ @return List of RejectedSchedulingRequests.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The response sent by the <code>ResourceManager</code> the
+ <code>ApplicationMaster</code> during resource negotiation.
+ <p>
+ The response, includes:
+ <ul>
+   <li>Response ID to track duplicate responses.</li>
+   <li>
+     An AMCommand sent by ResourceManager to let the
+     {@code ApplicationMaster} take some actions (resync, shutdown etc.).
+   </li>
+   <li>A list of newly allocated {@link Container}.</li>
+   <li>A list of completed {@link Container}s' statuses.</li>
+   <li>
+     The available headroom for resources in the cluster for the
+     application.
+   </li>
+   <li>A list of nodes whose status has been updated.</li>
+   <li>The number of available nodes in a cluster.</li>
+   <li>A description of resources requested back by the cluster</li>
+   <li>AMRMToken, if AMRMToken has been rolled over</li>
+   <li>
+     A list of {@link Container} representing the containers
+     whose resource has been increased.
+   </li>
+   <li>
+     A list of {@link Container} representing the containers
+     whose resource has been decreased.
+   </li>
+ </ul>
+ 
+ @see ApplicationMasterProtocol#allocate(AllocateRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope -->
+  <class name="ApplicationsRequestScope" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Enumeration that controls the scope of applications fetched]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.CommitResponse -->
+  <class name="CommitResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CommitResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Response to Commit Container Request.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.CommitResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest -->
+  <class name="ContainerUpdateRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerUpdateRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containersToIncrease" type="java.util.List"/>
+    </method>
+    <method name="getContainersToUpdate" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a list of container tokens to be used for authorization during
+ container resource update.
+ <p>
+ Note: {@link NMToken} will be used for authenticating communication with
+ {@code NodeManager}.
+ @return the list of container tokens to be used for authorization during
+ container resource update.
+ @see NMToken]]>
+      </doc>
+    </method>
+    <method name="setContainersToUpdate"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containersToUpdate" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set container tokens to be used during container resource increase.
+ The token is acquired from
+ <code>AllocateResponse.getUpdatedContainers</code>.
+ The token contains the container id and resource capability required for
+ container resource update.
+ @param containersToUpdate the list of container tokens to be used
+                             for container resource increase.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by <code>Application Master</code> to the
+ <code>Node Manager</code> to change the resource quota of a container.</p>
+
+ @see ContainerManagementProtocol#updateContainer(ContainerUpdateRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse -->
+  <class name="ContainerUpdateResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerUpdateResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="successfullyUpdatedContainers" type="java.util.List"/>
+      <param name="failedRequests" type="java.util.Map"/>
+    </method>
+    <method name="getSuccessfullyUpdatedContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of containerIds of containers whose resource
+ have been successfully update.
+
+ @return the list of containerIds of containers whose resource have
+ been successfully updated.]]>
+      </doc>
+    </method>
+    <method name="getFailedRequests" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the containerId-to-exception map in which the exception indicates
+ error from each container for failed requests.
+ @return map of containerId-to-exception]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>NodeManager</code> to the
+ <code>ApplicationMaster</code> when asked to update container resource.
+ </p>
+
+ @see ContainerManagementProtocol#updateContainer(ContainerUpdateRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest -->
+  <class name="FailApplicationAttemptRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FailApplicationAttemptRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+    </method>
+    <method name="getApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationAttemptId</code> of the attempt to be failed.
+ @return <code>ApplicationAttemptId</code> of the attempt.]]>
+      </doc>
+    </method>
+    <method name="setApplicationAttemptId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by the client to the <code>ResourceManager</code>
+ to fail an application attempt.</p>
+
+ <p>The request includes the {@link ApplicationAttemptId} of the attempt to
+ be failed.</p>
+
+ @see ApplicationClientProtocol#failApplicationAttempt(FailApplicationAttemptRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse -->
+  <class name="FailApplicationAttemptResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FailApplicationAttemptResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[<p>The response sent by the <code>ResourceManager</code> to the client
+ failing an application attempt.</p>
+
+ <p>Currently it's empty.</p>
+
+ @see ApplicationClientProtocol#failApplicationAttempt(FailApplicationAttemptRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.FailApplicationAttemptResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest -->
+  <class name="FinishApplicationMasterRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FinishApplicationMasterRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="finalAppStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <param name="diagnostics" type="java.lang.String"/>
+      <param name="url" type="java.lang.String"/>
+    </method>
+    <method name="getFinalApplicationStatus" return="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <em>final state</em> of the <code>ApplicationMaster</code>.
+ @return <em>final state</em> of the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="setFinalApplicationStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="finalState" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <doc>
+      <![CDATA[Set the <em>final state</em> of the <code>ApplicationMaster</code>
+ @param finalState <em>final state</em> of the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="getDiagnostics" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <em>diagnostic information</em> on application failure.
+ @return <em>diagnostic information</em> on application failure]]>
+      </doc>
+    </method>
+    <method name="setDiagnostics"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="diagnostics" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set <em>diagnostic information</em> on application failure.
+ @param diagnostics <em>diagnostic information</em> on application failure]]>
+      </doc>
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>tracking URL</em> for the <code>ApplicationMaster</code>.
+ This url if contains scheme then that will be used by resource manager
+ web application proxy otherwise it will default to http.
+ @return <em>tracking URL</em>for the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="setTrackingUrl"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <em>final tracking URL</em>for the <code>ApplicationMaster</code>.
+ This is the web-URL to which ResourceManager or web-application proxy will
+ redirect client/users once the application is finished and the
+ <code>ApplicationMaster</code> is gone.
+ <p>
+ If the passed url has a scheme then that will be used by the
+ ResourceManager and web-application proxy, otherwise the scheme will
+ default to http.
+ </p>
+ <p>
+ Empty, null, "N/A" strings are all valid besides a real URL. In case an url
+ isn't explicitly passed, it defaults to "N/A" on the ResourceManager.
+ <p>
+
+ @param url
+          <em>tracking URL</em>for the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The finalization request sent by the {@code ApplicationMaster} to
+ inform the {@code ResourceManager} about its completion.
+ <p>
+ The final request includes details such:
+ <ul>
+   <li>Final state of the {@code ApplicationMaster}</li>
+   <li>
+     Diagnostic information in case of failure of the
+     {@code ApplicationMaster}
+   </li>
+   <li>Tracking URL</li>
+ </ul>
+
+ @see ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse -->
+  <class name="FinishApplicationMasterResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FinishApplicationMasterResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getIsUnregistered" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the flag which indicates that the application has successfully
+ unregistered with the RM and the application can safely stop.
+ @return true if the application has unregistered with the RM,
+         false otherwise]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The response sent by the <code>ResourceManager</code> to a
+ <code>ApplicationMaster</code> on it's completion.
+ <p>
+ The response, includes:
+ <ul>
+ <li>A flag which indicates that the application has successfully unregistered
+ with the RM and the application can safely stop.</li>
+ </ul>
+ <p>
+ Note: The flag indicates whether the application has successfully
+ unregistered and is safe to stop. The application may stop after the flag is
+ true. If the application stops before the flag is true then the RM may retry
+ the application.
+ 
+ @see ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesRequest -->
+  <class name="GetAllResourceProfilesRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetAllResourceProfilesRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Request class for getting all the resource profiles from the RM.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesResponse -->
+  <class name="GetAllResourceProfilesResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetAllResourceProfilesResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setResourceProfiles"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="profiles" type="java.util.Map"/>
+    </method>
+    <method name="getResourceProfiles" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Response class for getting all the resource profiles from the RM.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceProfilesResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest -->
+  <class name="GetAllResourceTypeInfoRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetAllResourceTypeInfoRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Request class for getting all the resource profiles from the RM.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse -->
+  <class name="GetAllResourceTypeInfoResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetAllResourceTypeInfoResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setResourceTypeInfo"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceTypes" type="java.util.List"/>
+    </method>
+    <method name="getResourceTypeInfo" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Response class for getting all the resource profiles from the RM.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetAllResourceTypeInfoResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest -->
+  <class name="GetApplicationAttemptReportRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetApplicationAttemptReportRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+    </method>
+    <method name="getApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationAttemptId</code> of an application attempt.
+ 
+ @return <code>ApplicationAttemptId</code> of an application attempt]]>
+      </doc>
+    </method>
+    <method name="setApplicationAttemptId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationAttemptId</code> of an application attempt
+ 
+ @param applicationAttemptId
+          <code>ApplicationAttemptId</code> of an application attempt]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The request sent by a client to the <code>ResourceManager</code> to get an
+ {@link ApplicationAttemptReport} for an application attempt.
+ </p>
+ 
+ <p>
+ The request should include the {@link ApplicationAttemptId} of the
+ application attempt.
+ </p>
+ 
+ @see ApplicationAttemptReport
+ @see ApplicationHistoryProtocol#getApplicationAttemptReport(GetApplicationAttemptReportRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse -->
+  <class name="GetApplicationAttemptReportResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetApplicationAttemptReportResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ApplicationAttemptReport" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"/>
+    </method>
+    <method name="getApplicationAttemptReport" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationAttemptReport</code> for the application attempt.
+ 
+ @return <code>ApplicationAttemptReport</code> for the application attempt]]>
+      </doc>
+    </method>
+    <method name="setApplicationAttemptReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptReport" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"/>
+      <doc>
+      <![CDATA[Get the <code>ApplicationAttemptReport</code> for the application attempt.
+ 
+ @param applicationAttemptReport
+          <code>ApplicationAttemptReport</code> for the application attempt]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>ResourceManager</code> to a client requesting
+ an application attempt report.
+ </p>
+ 
+ <p>
+ The response includes an {@link ApplicationAttemptReport} which has the
+ details about the particular application attempt
+ </p>
+ 
+ @see ApplicationAttemptReport
+ @see ApplicationHistoryProtocol#getApplicationAttemptReport(GetApplicationAttemptReportRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest -->
+  <class name="GetApplicationAttemptsRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetApplicationAttemptsRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+    </method>
+    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationId</code> of an application
+ 
+ @return <code>ApplicationId</code> of an application]]>
+      </doc>
+    </method>
+    <method name="setApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationId</code> of an application
+ 
+ @param applicationId
+          <code>ApplicationId</code> of an application]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The request from clients to get a list of application attempt reports of an
+ application from the <code>ResourceManager</code>.
+ </p>
+ 
+ @see ApplicationHistoryProtocol#getApplicationAttempts(GetApplicationAttemptsRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse -->
+  <class name="GetApplicationAttemptsResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetApplicationAttemptsResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttempts" type="java.util.List"/>
+    </method>
+    <method name="getApplicationAttemptList" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a list of <code>ApplicationReport</code> of an application.
+ 
+ @return a list of <code>ApplicationReport</code> of an application]]>
+      </doc>
+    </method>
+    <method name="setApplicationAttemptList"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttempts" type="java.util.List"/>
+      <doc>
+      <![CDATA[Get a list of <code>ApplicationReport</code> of an application.
+ 
+ @param applicationAttempts
+          a list of <code>ApplicationReport</code> of an application]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>ResourceManager</code> to a client requesting
+ a list of {@link ApplicationAttemptReport} for application attempts.
+ </p>
+ 
+ <p>
+ The <code>ApplicationAttemptReport</code> for each application includes the
+ details of an application attempt.
+ </p>
+ 
+ @see ApplicationAttemptReport
+ @see ApplicationHistoryProtocol#getApplicationAttempts(GetApplicationAttemptsRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest -->
+  <class name="GetApplicationReportRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetApplicationReportRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+    </method>
+    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationId</code> of the application.
+ @return <code>ApplicationId</code> of the application]]>
+      </doc>
+    </method>
+    <method name="setApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationId</code> of the application
+ @param applicationId <code>ApplicationId</code> of the application]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by a client to the <code>ResourceManager</code> to 
+ get an {@link ApplicationReport} for an application.</p>
+ 
+ <p>The request should include the {@link ApplicationId} of the 
+ application.</p>
+ 
+ @see ApplicationClientProtocol#getApplicationReport(GetApplicationReportRequest)
+ @see ApplicationReport]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse -->
+  <class name="GetApplicationReportResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetApplicationReportResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getApplicationReport" return="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationReport</code> for the application.
+ @return <code>ApplicationReport</code> for the application]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The response sent by the <code>ResourceManager</code> to a client
+ requesting an application report.</p>
+ 
+ <p>The response includes an {@link ApplicationReport} which has details such 
+ as user, queue, name, host on which the <code>ApplicationMaster</code> is 
+ running, RPC port, tracking URL, diagnostics, start time etc.</p>
+ 
+ @see ApplicationClientProtocol#getApplicationReport(GetApplicationReportRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest -->
+  <class name="GetApplicationsRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetApplicationsRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scope" type="org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope"/>
+      <param name="users" type="java.util.Set"/>
+      <param name="queues" type="java.util.Set"/>
+      <param name="applicationTypes" type="java.util.Set"/>
+      <param name="applicationTags" type="java.util.Set"/>
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <param name="startRange" type="org.apache.commons.lang3.Range"/>
+      <param name="finishRange" type="org.apache.commons.lang3.Range"/>
+      <param name="limit" type="java.lang.Long"/>
+      <doc>
+      <![CDATA[<p>
+ The request from clients to get a report of Applications matching the
+ giving application types in the cluster from the
+ <code>ResourceManager</code>.
+ </p>
+
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
+
+ <p>Setting any of the parameters to null, would just disable that
+ filter</p>
+
+ @param scope {@link ApplicationsRequestScope} to filter by
+ @param users list of users to filter by
+ @param queues list of scheduler queues to filter by
+ @param applicationTypes types of applications
+ @param applicationTags application tags to filter by
+ @param applicationStates application states to filter by
+ @param startRange range of application start times to filter by
+ @param finishRange range of application finish times to filter by
+ @param limit number of applications to limit to
+ @return {@link GetApplicationsRequest} to be used with
+ {@link ApplicationClientProtocol#getApplications(GetApplicationsRequest)}]]>
+      </doc>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scope" type="org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope"/>
+      <doc>
+      <![CDATA[<p>
+ The request from clients to get a report of Applications matching the
+ giving application types in the cluster from the
+ <code>ResourceManager</code>.
+ </p>
+
+ @param scope {@link ApplicationsRequestScope} to filter by
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
+ @return a report of Applications in {@link GetApplicationsRequest}]]>
+      </doc>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTypes" type="java.util.Set"/>
+      <doc>
+      <![CDATA[<p>
+ The request from clients to get a report of Applications matching the
+ giving application types in the cluster from the
+ <code>ResourceManager</code>.
+ </p>
+
+
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
+ @return a report of Applications in {@link GetApplicationsRequest}]]>
+      </doc>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <doc>
+      <![CDATA[<p>
+ The request from clients to get a report of Applications matching the
+ giving application states in the cluster from the
+ <code>ResourceManager</code>.
+ </p>
+
+
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
+ @return  a report of Applications in {@link GetApplicationsRequest}]]>
+      </doc>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTypes" type="java.util.Set"/>
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <doc>
+      <![CDATA[<p>
+ The request from clients to get a report of Applications matching the
+ giving and application types and application types in the cluster from the
+ <code>ResourceManager</code>.
+ </p>
+
+
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
+ @return  a report of Applications in <code>GetApplicationsRequest</code>]]>
+      </doc>
+    </method>
+    <method name="getApplicationTypes" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the application types to filter applications on
+
+ @return Set of Application Types to filter on]]>
+      </doc>
+    </method>
+    <method name="getApplicationStates" return="java.util.EnumSet"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the application states to filter applications on
+
+ @return Set of Application states to filter on]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The request from clients to get a report of Applications
+ in the cluster from the <code>ResourceManager</code>.</p>
+
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse -->
+  <class name="GetApplicationsResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetApplicationsResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getApplicationList" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <code>ApplicationReport</code> for applications.
+ @return <code>ApplicationReport</code> for applications]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The response sent by the <code>ResourceManager</code> to a client
+ requesting an {@link ApplicationReport} for applications.</p>
+
+ <p>The <code>ApplicationReport</code> for each application includes details
+ such as user, queue, name, host on which the <code>ApplicationMaster</code>
+ is running, RPC port, tracking URL, diagnostics, start time etc.</p>
+
+ @see ApplicationReport
+ @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest -->
+  <class name="GetAttributesToNodesRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetAttributesToNodesRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributes" type="java.util.Set"/>
+    </method>
+    <method name="setNodeAttributes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributes" type="java.util.Set"/>
+      <doc>
+      <![CDATA[Set node attributeKeys for which the mapping of hostname to attribute value
+ is required.
+
+ @param attributes Set of NodeAttributeKey provided.]]>
+      </doc>
+    </method>
+    <method name="getNodeAttributes" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get node attributeKeys for which mapping of hostname to attribute value is
+ required.
+
+ @return Set of NodeAttributeKey]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The request from clients to get node to attribute value mapping for all or
+ given set of Node AttributeKey's in the cluster from the
+ <code>ResourceManager</code>.
+ </p>
+
+ @see ApplicationClientProtocol#getAttributesToNodes
+      (GetAttributesToNodesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse -->
+  <class name="GetAttributesToNodesResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetAttributesToNodesResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="map" type="java.util.Map"/>
+    </method>
+    <method name="setAttributeToNodes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="map" type="java.util.Map"/>
+    </method>
+    <method name="getAttributesToNodes" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get mapping of NodeAttributeKey to its associated mapping of list of
+ NodeToAttributeValue associated with attribute.
+
+ @return Map of node attributes to list of NodeToAttributeValue.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>ResourceManager</code> to a client requesting
+ node to attribute value mapping for all or given set of Node AttributeKey's.
+ </p>
+
+ @see ApplicationClientProtocol#getAttributesToNodes
+      (GetAttributesToNodesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetAttributesToNodesResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest -->
+  <class name="GetClusterMetricsRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetClusterMetricsRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by clients to get cluster metrics from the 
+ <code>ResourceManager</code>.</p>
+ 
+ <p>Currently, this is empty.</p>
+
+ @see ApplicationClientProtocol#getClusterMetrics(GetClusterMetricsRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse -->
+  <class name="GetClusterMetricsResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetClusterMetricsResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getClusterMetrics" return="org.apache.hadoop.yarn.api.records.YarnClusterMetrics"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>YarnClusterMetrics</code> for the cluster.
+ @return <code>YarnClusterMetrics</code> for the cluster]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The response sent by the <code>ResourceManager</code> to a client
+ requesting cluster metrics.
+ 
+ @see YarnClusterMetrics
+ @see ApplicationClientProtocol#getClusterMetrics(GetClusterMetricsRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest -->
+  <class name="GetClusterNodeAttributesRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetClusterNodeAttributesRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create new instance of GetClusterNodeAttributesRequest.
+
+ @return GetClusterNodeAttributesRequest is returned.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The request from clients to get node attributes in the cluster from the
+ <code>ResourceManager</code>.
+ </p>
+
+ @see ApplicationClientProtocol#getClusterNodeAttributes
+ (GetClusterNodeAttributesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesResponse -->
+  <class name="GetClusterNodeAttributesResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetClusterNodeAttributesResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributes" type="java.util.Set"/>
+      <doc>
+      <![CDATA[Create instance of GetClusterNodeAttributesResponse.
+
+ @param attributes
+ @return GetClusterNodeAttributesResponse.]]>
+      </doc>
+    </method>
+    <method name="setNodeAttributes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributes" type="java.util.Set"/>
+      <doc>
+      <![CDATA[Set node attributes to the response.
+
+ @param attributes Map of Node attributeKey to Type.]]>
+      </doc>
+    </method>
+    <method name="getNodeAttributes" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get node attributes from the response.
+
+ @return Node attributes.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>ResourceManager</code> to a client requesting
+ a node attributes in cluster.
+ </p>
+
+ @see ApplicationClientProtocol#getClusterNodeAttributes
+ (GetClusterNodeAttributesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeAttributesResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest -->
+  <class name="GetClusterNodeLabelsRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetClusterNodeLabelsRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse -->
+  <class name="GetClusterNodeLabelsResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetClusterNodeLabelsResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #newInstance(List)} instead.">
+      <param name="labels" type="java.util.Set"/>
+      <doc>
+      <![CDATA[Creates a new instance.
+
+ @param labels Node labels
+ @return response
+ @deprecated Use {@link #newInstance(List)} instead.]]>
+      </doc>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labels" type="java.util.List"/>
+    </method>
+    <method name="setNodeLabelList"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labels" type="java.util.List"/>
+    </method>
+    <method name="getNodeLabelList" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setNodeLabels"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #setNodeLabelList(List)} instead.">
+      <param name="labels" type="java.util.Set"/>
+      <doc>
+      <![CDATA[Set node labels to the response.
+
+ @param labels Node labels
+ @deprecated Use {@link #setNodeLabelList(List)} instead.]]>
+      </doc>
+    </method>
+    <method name="getNodeLabels" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="Use {@link #getNodeLabelList()} instead.">
+      <doc>
+      <![CDATA[Get node labels of the response.
+
+ @return Node labels
+ @deprecated Use {@link #getNodeLabelList()} instead.]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest -->
+  <class name="GetClusterNodesRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetClusterNodesRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="states" type="java.util.EnumSet"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNodeStates" return="java.util.EnumSet"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The state to filter the cluster nodes with.
+ @return the set of {@link NodeState}]]>
+      </doc>
+    </method>
+    <method name="setNodeStates"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="states" type="java.util.EnumSet"/>
+      <doc>
+      <![CDATA[The state to filter the cluster nodes with.
+ @param states the set of {@link NodeState}]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The request from clients to get a report of all nodes
+ in the cluster from the <code>ResourceManager</code>.</p>
+
+ The request will ask for all nodes in the given {@link NodeState}s.
+
+ @see ApplicationClientProtocol#getClusterNodes(GetClusterNodesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse -->
+  <class name="GetClusterNodesResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetClusterNodesResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNodeReports" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <code>NodeReport</code> for all nodes in the cluster.
+ @return <code>NodeReport</code> for all nodes in the cluster]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The response sent by the <code>ResourceManager</code> to a client
+ requesting a {@link NodeReport} for all nodes.</p>
+ 
+ <p>The <code>NodeReport</code> contains per-node information such as 
+ available resources, number of containers, tracking url, rack name, health
+ status etc.
+ 
+ @see NodeReport
+ @see ApplicationClientProtocol#getClusterNodes(GetClusterNodesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest -->
+  <class name="GetContainerReportRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetContainerReportRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+    </method>
+    <method name="getContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerId</code> of the Container.
+ 
+ @return <code>ContainerId</code> of the Container]]>
+      </doc>
+    </method>
+    <method name="setContainerId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <doc>
+      <![CDATA[Set the <code>ContainerId</code> of the container
+ 
+ @param containerId
+          <code>ContainerId</code> of the container]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The request sent by a client to the <code>ResourceManager</code> to get an
+ {@link ContainerReport} for a container.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse -->
+  <class name="GetContainerReportResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetContainerReportResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerReport" type="org.apache.hadoop.yarn.api.records.ContainerReport"/>
+    </method>
+    <method name="getContainerReport" return="org.apache.hadoop.yarn.api.records.ContainerReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerReport</code> for the container.
+ 
+ @return <code>ContainerReport</code> for the container]]>
+      </doc>
+    </method>
+    <method name="setContainerReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerReport" type="org.apache.hadoop.yarn.api.records.ContainerReport"/>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>ResourceManager</code> to a client requesting
+ a container report.
+ </p>
+ 
+ <p>
+ The response includes a {@link ContainerReport} which has details of a
+ container.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetContainerReportResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest -->
+  <class name="GetContainersRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetContainersRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+    </method>
+    <method name="getApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationAttemptId</code> of an application attempt.
+ 
+ @return <code>ApplicationAttemptId</code> of an application attempt]]>
+      </doc>
+    </method>
+    <method name="setApplicationAttemptId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationAttemptId</code> of an application attempt
+ 
+ @param applicationAttemptId
+          <code>ApplicationAttemptId</code> of an application attempt]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The request from clients to get a list of container reports, which belong to
+ an application attempt from the <code>ResourceManager</code>.
+ </p>
+ 
+ @see ApplicationHistoryProtocol#getContainers(GetContainersRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetContainersRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse -->
+  <class name="GetContainersResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetContainersResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containers" type="java.util.List"/>
+    </method>
+    <method name="getContainerList" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a list of <code>ContainerReport</code> for all the containers of an
+ application attempt.
+ 
+ @return a list of <code>ContainerReport</code> for all the containers of an
+         application attempt]]>
+      </doc>
+    </method>
+    <method name="setContainerList"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containers" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set a list of <code>ContainerReport</code> for all the containers of an
+ application attempt.
+ 
+ @param containers
+          a list of <code>ContainerReport</code> for all the containers of
+          an application attempt]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>ResourceManager</code> to a client requesting
+ a list of {@link ContainerReport} for containers.
+ </p>
+ 
+ <p>
+ The <code>ContainerReport</code> for each container includes the container
+ details.
+ </p>
+ 
+ @see ContainerReport
+ @see ApplicationHistoryProtocol#getContainers(GetContainersRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetContainersResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest -->
+  <class name="GetContainerStatusesRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetContainerStatusesRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerIds" type="java.util.List"/>
+    </method>
+    <method name="getContainerIds" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <code>ContainerId</code>s of containers for which to obtain
+ the <code>ContainerStatus</code>.
+ 
+ @return the list of <code>ContainerId</code>s of containers for which to
+         obtain the <code>ContainerStatus</code>.]]>
+      </doc>
+    </method>
+    <method name="setContainerIds"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerIds" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set a list of <code>ContainerId</code>s of containers for which to obtain
+ the <code>ContainerStatus</code>
+ 
+ @param containerIds
+          a list of <code>ContainerId</code>s of containers for which to
+          obtain the <code>ContainerStatus</code>]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The request sent by the <code>ApplicationMaster</code> to the
+ <code>NodeManager</code> to get {@link ContainerStatus} of requested
+ containers.
+ 
+ @see ContainerManagementProtocol#getContainerStatuses(GetContainerStatusesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse -->
+  <class name="GetContainerStatusesResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetContainerStatusesResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getContainerStatuses" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerStatus</code>es of the requested containers.
+ 
+ @return <code>ContainerStatus</code>es of the requested containers.]]>
+      </doc>
+    </method>
+    <method name="getFailedRequests" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the containerId-to-exception map in which the exception indicates error
+ from per container for failed requests
+ @return map of containerId-to-exception]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The response sent by the <code>NodeManager</code> to the
+ <code>ApplicationMaster</code> when asked to obtain the
+ <code>ContainerStatus</code> of requested containers.
+ 
+ @see ContainerManagementProtocol#getContainerStatuses(GetContainerStatusesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest -->
+  <class name="GetDelegationTokenRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetDelegationTokenRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="java.lang.String"/>
+    </method>
+    <method name="getRenewer" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setRenewer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[The request issued by the client to get a delegation token from
+ the {@code ResourceManager}.
+ for more information.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse -->
+  <class name="GetDelegationTokenResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetDelegationTokenResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRMDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The Delegation tokens have a identifier which maps to
+ {@link AbstractDelegationTokenIdentifier}.
+ @return the delegation tokens]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Response to a {@link GetDelegationTokenRequest} request 
+ from the client. The response contains the token that 
+ can be used by the containers to talk to  ClientRMService.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetLocalizationStatusesRequest -->
+  <class name="GetLocalizationStatusesRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetLocalizationStatusesRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetLocalizationStatusesRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerIds" type="java.util.List"/>
+    </method>
+    <method name="getContainerIds" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of container IDs of the containers for which the localization
+ statuses are needed.
+
+ @return the list of container IDs.]]>
+      </doc>
+    </method>
+    <method name="setContainerIds"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerIds" type="java.util.List"/>
+      <doc>
+      <![CDATA[Sets the list of container IDs of containers for which the localization
+ statuses are needed.
+ @param containerIds the list of container IDs.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The request sent by an application master to the node manager to get
+ {@link LocalizationStatus}es of containers.
+
+ @see ContainerManagementProtocol#getLocalizationStatuses(
+        GetLocalizationStatusesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetLocalizationStatusesRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetLocalizationStatusesResponse -->
+  <class name="GetLocalizationStatusesResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetLocalizationStatusesResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetLocalizationStatusesResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="statuses" type="java.util.Map"/>
+      <param name="failedRequests" type="java.util.Map"/>
+    </method>
+    <method name="getLocalizationStatuses" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get all the container localization statuses.
+
+ @return container localization statuses.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The response sent by the node manager to an application master when
+ localization statuses are requested.
+
+ @see ContainerManagementProtocol#getLocalizationStatuses(
+        GetLocalizationStatusesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetLocalizationStatusesResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest -->
+  <class name="GetNewApplicationRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetNewApplicationRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by clients to get a new {@link ApplicationId} for
+ submitting an application.</p>
+ 
+ <p>Currently, this is empty.</p>
+ 
+ @see ApplicationClientProtocol#getNewApplication(GetNewApplicationRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse -->
+  <class name="GetNewApplicationResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetNewApplicationResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>new</em> <code>ApplicationId</code> allocated by the 
+ <code>ResourceManager</code>.
+ @return <em>new</em> <code>ApplicationId</code> allocated by the 
+          <code>ResourceManager</code>]]>
+      </doc>
+    </method>
+    <method name="getMaximumResourceCapability" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the maximum capability for any {@link Resource} allocated by the 
+ <code>ResourceManager</code> in the cluster.
+ @return maximum capability of allocated resources in the cluster]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The response sent by the <code>ResourceManager</code> to the client for 
+ a request to get a new {@link ApplicationId} for submitting applications.</p>
+ 
+ <p>Clients can submit an application with the returned
+ {@link ApplicationId}.</p>
+
+ @see ApplicationClientProtocol#getNewApplication(GetNewApplicationRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest -->
+  <class name="GetNewReservationRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetNewReservationRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by clients to get a new {@code ReservationId} for
+ submitting an reservation.</p>
+
+ {@code ApplicationClientProtocol#getNewReservation(GetNewReservationRequest)}]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse -->
+  <class name="GetNewReservationResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetNewReservationResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getReservationId" return="org.apache.hadoop.yarn.api.records.ReservationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a new {@link ReservationId} to be used to submit a reservation.
+
+ @return a {@link ReservationId} representing the unique id to identify
+ a reservation with which it was submitted.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The response sent by the <code>ResourceManager</code> to the client for
+ a request to get a new {@link ReservationId} for submitting reservations.</p>
+
+ <p>Clients can submit an reservation with the returned
+ {@link ReservationId}.</p>
+
+ {@code ApplicationClientProtocol#getNewReservation(GetNewReservationRequest)}]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesRequest -->
+  <class name="GetNodesToAttributesRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetNodesToAttributesRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hostNames" type="java.util.Set"/>
+    </method>
+    <method name="setHostNames"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hostnames" type="java.util.Set"/>
+      <doc>
+      <![CDATA[Set hostnames for which mapping is required.
+
+ @param hostnames]]>
+      </doc>
+    </method>
+    <method name="getHostNames" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get hostnames for which mapping is required.
+
+ @return Set of hostnames.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The request from clients to get nodes to attributes mapping
+ in the cluster from the <code>ResourceManager</code>.
+ </p>
+
+ @see ApplicationClientProtocol#getNodesToAttributes
+ (GetNodesToAttributesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesResponse -->
+  <class name="GetNodesToAttributesResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetNodesToAttributesResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="map" type="java.util.Map"/>
+    </method>
+    <method name="setNodeToAttributes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="map" type="java.util.Map"/>
+    </method>
+    <method name="getNodeToAttributes" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get hostnames to NodeAttributes mapping.
+
+ @return Map of host to attributes.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>ResourceManager</code> to a client requesting
+ nodes to attributes mapping.
+ </p>
+
+ @see ApplicationClientProtocol#getNodesToAttributes
+ (GetNodesToAttributesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetNodesToAttributesResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest -->
+  <class name="GetQueueInfoRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetQueueInfoRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queueName" type="java.lang.String"/>
+      <param name="includeApplications" type="boolean"/>
+      <param name="includeChildQueues" type="boolean"/>
+      <param name="recursive" type="boolean"/>
+    </method>
+    <method name="getQueueName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>queue name</em> for which to get queue information.
+ @return <em>queue name</em> for which to get queue information]]>
+      </doc>
+    </method>
+    <method name="setQueueName"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queueName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <em>queue name</em> for which to get queue information
+ @param queueName <em>queue name</em> for which to get queue information]]>
+      </doc>
+    </method>
+    <method name="getIncludeApplications" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Is information about <em>active applications</em> required?
+ @return <code>true</code> if applications' information is to be included,
+         else <code>false</code>]]>
+      </doc>
+    </method>
+    <method name="setIncludeApplications"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="includeApplications" type="boolean"/>
+      <doc>
+      <![CDATA[Should we get fetch information about <em>active applications</em>?
+ @param includeApplications fetch information about <em>active 
+                            applications</em>?]]>
+      </doc>
+    </method>
+    <method name="getIncludeChildQueues" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Is information about <em>child queues</em> required?
+ @return <code>true</code> if information about child queues is required,
+         else <code>false</code>]]>
+      </doc>
+    </method>
+    <method name="setIncludeChildQueues"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="includeChildQueues" type="boolean"/>
+      <doc>
+      <![CDATA[Should we fetch information about <em>child queues</em>?
+ @param includeChildQueues fetch information about <em>child queues</em>?]]>
+      </doc>
+    </method>
+    <method name="getRecursive" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Is information on the entire <em>child queue hierarchy</em> required?
+ @return <code>true</code> if information about entire hierarchy is 
+         required, <code>false</code> otherwise]]>
+      </doc>
+    </method>
+    <method name="setRecursive"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="recursive" type="boolean"/>
+      <doc>
+      <![CDATA[Should we fetch information on the entire <em>child queue hierarchy</em>?
+ @param recursive fetch information on the entire <em>child queue 
+                  hierarchy</em>?]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by clients to get <em>queue information</em>
+ from the <code>ResourceManager</code>.</p>
+
+ @see ApplicationClientProtocol#getQueueInfo(GetQueueInfoRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse -->
+  <class name="GetQueueInfoResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetQueueInfoResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getQueueInfo" return="org.apache.hadoop.yarn.api.records.QueueInfo"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>QueueInfo</code> for the specified queue.
+ @return <code>QueueInfo</code> for the specified queue]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The response sent by the {@code ResourceManager} to a client
+ requesting information about queues in the system.
+ <p>
+ The response includes a {@link QueueInfo} which has details such as
+ queue name, used/total capacities, running applications, child queues etc.
+ 
+ @see QueueInfo
+ @see ApplicationClientProtocol#getQueueInfo(GetQueueInfoRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest -->
+  <class name="GetQueueUserAclsInfoRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetQueueUserAclsInfoRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by clients to the <code>ResourceManager</code> to 
+ get queue acls for the <em>current user</em>.</p>
+
+ <p>Currently, this is empty.</p>
+ 
+ @see ApplicationClientProtocol#getQueueUserAcls(GetQueueUserAclsInfoRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse -->
+  <class name="GetQueueUserAclsInfoResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetQueueUserAclsInfoResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getUserAclsInfoList" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>QueueUserACLInfo</code> per queue for the user.
+ @return <code>QueueUserACLInfo</code> per queue for the user]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The response sent by the <code>ResourceManager</code> to clients
+ seeking queue acls for the user.</p>
+
+ <p>The response contains a list of {@link QueueUserACLInfo} which
+ provides information about {@link QueueACL} per queue.</p>
+ 
+ @see QueueACL
+ @see QueueUserACLInfo
+ @see ApplicationClientProtocol#getQueueUserAcls(GetQueueUserAclsInfoRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileRequest -->
+  <class name="GetResourceProfileRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetResourceProfileRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="profile" type="java.lang.String"/>
+    </method>
+    <method name="setProfileName"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="profileName" type="java.lang.String"/>
+    </method>
+    <method name="getProfileName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Request class for getting the details for a particular resource profile.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileResponse -->
+  <class name="GetResourceProfileResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="GetResourceProfileResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getResource" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the resources that will be allocated if the profile was used.
+
+ @return the resources that will be allocated if the profile was used.]]>
+      </doc>
+    </method>
+    <method name="setResource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="r" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Set the resources that will be allocated if the profile is used.
+
+ @param r Set the resources that will be allocated if the profile is used.]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Response class for getting the details for a particular resource profile.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.GetResourceProfileResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest -->
+  <class name="IncreaseContainersResourceRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="IncreaseContainersResourceRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containersToIncrease" type="java.util.List"/>
+    </method>
+    <method name="getContainersToIncrease" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a list of container tokens to be used for authorization during
+ container resource increase.
+ <p>
+ Note: {@link NMToken} will be used for authenticating communication with
+ {@code NodeManager}.
+ @return the list of container tokens to be used for authorization during
+ container resource increase.
+ @see NMToken]]>
+      </doc>
+    </method>
+    <method name="setContainersToIncrease"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containersToIncrease" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set container tokens to be used during container resource increase.
+ The token is acquired from
+ <code>AllocateResponse.getIncreasedContainers</code>.
+ The token contains the container id and resource capability required for
+ container resource increase.
+ @param containersToIncrease the list of container tokens to be used
+                             for container resource increase.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by <code>Application Master</code> to the
+ <code>Node Manager</code> to change the resource quota of a container.</p>
+
+ @see ContainerManagementProtocol#increaseContainersResource(IncreaseContainersResourceRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse -->
+  <class name="IncreaseContainersResourceResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="IncreaseContainersResourceResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getSuccessfullyIncreasedContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of containerIds of containers whose resource
+ have been successfully increased.
+
+ @return the list of containerIds of containers whose resource have
+ been successfully increased.]]>
+      </doc>
+    </method>
+    <method name="getFailedRequests" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the containerId-to-exception map in which the exception indicates
+ error from each container for failed requests.
+ @return map of containerId-to-exception]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>NodeManager</code> to the
+ <code>ApplicationMaster</code> when asked to increase container resource.
+ </p>
+
+ @see ContainerManagementProtocol#increaseContainersResource(IncreaseContainersResourceRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.IncreaseContainersResourceResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest -->
+  <class name="KillApplicationRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="KillApplicationRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+    </method>
+    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationId</code> of the application to be aborted.
+ @return <code>ApplicationId</code> of the application to be aborted]]>
+      </doc>
+    </method>
+    <method name="setApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+    </method>
+    <method name="getDiagnostics" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>diagnostics</em> to which the application is being killed.
+ @return <em>diagnostics</em> to which the application is being killed]]>
+      </doc>
+    </method>
+    <method name="setDiagnostics"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="diagnostics" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <em>diagnostics</em> to which the application is being killed.
+ @param diagnostics <em>diagnostics</em> to which the application is being
+          killed]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by the client to the <code>ResourceManager</code>
+ to abort a submitted application.</p>
+ 
+ <p>The request includes the {@link ApplicationId} of the application to be
+ aborted.</p>
+ 
+ @see ApplicationClientProtocol#forceKillApplication(KillApplicationRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse -->
+  <class name="KillApplicationResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="KillApplicationResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getIsKillCompleted" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the flag which indicates that the process of killing application is completed or not.
+ @return true if the process of killing application has completed,
+         false otherwise]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The response sent by the <code>ResourceManager</code> to the client aborting
+ a submitted application.
+ <p>
+ The response, includes:
+ <ul>
+   <li>
+     A flag which indicates that the process of killing the application is
+     completed or not.
+   </li>
+ </ul>
+ Note: user is recommended to wait until this flag becomes true, otherwise if
+ the <code>ResourceManager</code> crashes before the process of killing the
+ application is completed, the <code>ResourceManager</code> may retry this
+ application on recovery.
+ 
+ @see ApplicationClientProtocol#forceKillApplication(KillApplicationRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest -->
+  <class name="MoveApplicationAcrossQueuesRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MoveApplicationAcrossQueuesRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="queue" type="java.lang.String"/>
+    </method>
+    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationId</code> of the application to be moved.
+ @return <code>ApplicationId</code> of the application to be moved]]>
+      </doc>
+    </method>
+    <method name="setApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationId</code> of the application to be moved.
+ @param appId <code>ApplicationId</code> of the application to be moved]]>
+      </doc>
+    </method>
+    <method name="getTargetQueue" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the queue to place the application in.
+ @return the name of the queue to place the application in]]>
+      </doc>
+    </method>
+    <method name="setTargetQueue"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the queue to place the application in.
+ @param queue the name of the queue to place the application in]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by the client to the <code>ResourceManager</code>
+ to move a submitted application to a different queue.</p>
+ 
+ <p>The request includes the {@link ApplicationId} of the application to be
+ moved and the queue to place it in.</p>
+ 
+ @see ApplicationClientProtocol#moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse -->
+  <class name="MoveApplicationAcrossQueuesResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="MoveApplicationAcrossQueuesResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>ResourceManager</code> to the client moving
+ a submitted application to a different queue.
+ </p>
+ <p>
+ A response without exception means that the move has completed successfully.
+ </p>
+ 
+ @see ApplicationClientProtocol#moveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest -->
+  <class name="RegisterApplicationMasterRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RegisterApplicationMasterRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="host" type="java.lang.String"/>
+      <param name="port" type="int"/>
+      <param name="trackingUrl" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Create a new instance of <code>RegisterApplicationMasterRequest</code>.
+ If <em>port, trackingUrl</em> is not used, use the following default value:
+ <ul>
+  <li>port: -1</li>
+  <li>trackingUrl: null</li>
+ </ul>
+ The port is allowed to be any integer larger than or equal to -1.
+ @return the new instance of <code>RegisterApplicationMasterRequest</code>]]>
+      </doc>
+    </method>
+    <method name="getHost" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>host</em> on which the <code>ApplicationMaster</code> is 
+ running.
+ @return <em>host</em> on which the <code>ApplicationMaster</code> is running]]>
+      </doc>
+    </method>
+    <method name="setHost"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="host" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <em>host</em> on which the <code>ApplicationMaster</code> is 
+ running.
+ @param host <em>host</em> on which the <code>ApplicationMaster</code> 
+             is running]]>
+      </doc>
+    </method>
+    <method name="getRpcPort" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>RPC port</em> on which the {@code ApplicationMaster} is
+ responding.
+ @return the <em>RPC port</em> on which the {@code ApplicationMaster}
+         is responding]]>
+      </doc>
+    </method>
+    <method name="setRpcPort"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="port" type="int"/>
+      <doc>
+      <![CDATA[Set the <em>RPC port</em> on which the {@code ApplicationMaster} is
+ responding.
+ @param port <em>RPC port</em> on which the {@code ApplicationMaster}
+             is responding]]>
+      </doc>
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>tracking URL</em> for the <code>ApplicationMaster</code>.
+ This url if contains scheme then that will be used by resource manager
+ web application proxy otherwise it will default to http.
+ @return <em>tracking URL</em> for the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="setTrackingUrl"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="trackingUrl" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <em>tracking URL</em>for the <code>ApplicationMaster</code> while
+ it is running. This is the web-URL to which ResourceManager or
+ web-application proxy will redirect client/users while the application and
+ the <code>ApplicationMaster</code> are still running.
+ <p>
+ If the passed url has a scheme then that will be used by the
+ ResourceManager and web-application proxy, otherwise the scheme will
+ default to http.
+ </p>
+ <p>
+ Empty, null, "N/A" strings are all valid besides a real URL. In case an url
+ isn't explicitly passed, it defaults to "N/A" on the ResourceManager.
+ <p>
+
+ @param trackingUrl
+          <em>tracking URL</em>for the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="getPlacementConstraints" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return all Placement Constraints specified at the Application level. The
+ mapping is from a set of allocation tags to a
+ <code>PlacementConstraint</code> associated with the tags, i.e., each
+ {@link org.apache.hadoop.yarn.api.records.SchedulingRequest} that has those
+ tags will be placed taking into account the corresponding constraint.
+
+ @return A map of Placement Constraints.]]>
+      </doc>
+    </method>
+    <method name="setPlacementConstraints"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="placementConstraints" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Set Placement Constraints applicable to the
+ {@link org.apache.hadoop.yarn.api.records.SchedulingRequest}s
+ of this application.
+ The mapping is from a set of allocation tags to a
+ <code>PlacementConstraint</code> associated with the tags.
+ For example:
+  Map &lt;
+   &lt;hb_regionserver&gt; -&gt; node_anti_affinity,
+   &lt;hb_regionserver, hb_master&gt; -&gt; rack_affinity,
+   ...
+  &gt;
+ @param placementConstraints Placement Constraint Mapping.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The request sent by the {@code ApplicationMaster} to {@code ResourceManager}
+ on registration.
+ <p>
+ The registration includes details such as:
+ <ul>
+   <li>Hostname on which the AM is running.</li>
+   <li>RPC Port</li>
+   <li>Tracking URL</li>
+ </ul>
+ 
+ @see ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse -->
+  <class name="RegisterApplicationMasterResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RegisterApplicationMasterResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getMaximumResourceCapability" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the maximum capability for any {@link Resource} allocated by the 
+ <code>ResourceManager</code> in the cluster.
+ @return maximum capability of allocated resources in the cluster]]>
+      </doc>
+    </method>
+    <method name="getApplicationACLs" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationACL</code>s for the application. 
+ @return all the <code>ApplicationACL</code>s]]>
+      </doc>
+    </method>
+    <method name="getClientToAMTokenMasterKey" return="java.nio.ByteBuffer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[<p>Get ClientToAMToken master key.</p>
+ <p>The ClientToAMToken master key is sent to <code>ApplicationMaster</code>
+ by <code>ResourceManager</code> via {@link RegisterApplicationMasterResponse}
+ , used to verify corresponding ClientToAMToken.</p>
+ @return ClientToAMToken master key]]>
+      </doc>
+    </method>
+    <method name="setClientToAMTokenMasterKey"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.nio.ByteBuffer"/>
+      <doc>
+      <![CDATA[Set ClientToAMToken master key.]]>
+      </doc>
+    </method>
+    <method name="getQueue" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[<p>Get the queue that the application was placed in.<p>
+ @return the queue that the application was placed in.]]>
+      </doc>
+    </method>
+    <method name="setQueue"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[<p>Set the queue that the application was placed in.<p>]]>
+      </doc>
+    </method>
+    <method name="getContainersFromPreviousAttempts" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[<p>
+ Get the list of running containers as viewed by
+ <code>ResourceManager</code> from previous application attempts.
+ </p>
+ 
+ @return the list of running containers as viewed by
+         <code>ResourceManager</code> from previous application attempts
+ @see RegisterApplicationMasterResponse#getNMTokensFromPreviousAttempts()]]>
+      </doc>
+    </method>
+    <method name="getNMTokensFromPreviousAttempts" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of NMTokens for communicating with the NMs where the
+ containers of previous application attempts are running.
+ 
+ @return the list of NMTokens for communicating with the NMs where the
+         containers of previous application attempts are running.
+ 
+ @see RegisterApplicationMasterResponse#getContainersFromPreviousAttempts()]]>
+      </doc>
+    </method>
+    <method name="getSchedulerResourceTypes" return="java.util.EnumSet"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a set of the resource types considered by the scheduler.
+
+ @return a Map of RM settings]]>
+      </doc>
+    </method>
+    <method name="getResourceProfiles" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get list of supported resource profiles from RM.
+
+ @return a map of resource profiles and its capabilities.]]>
+      </doc>
+    </method>
+    <method name="getResourceTypes" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get available resource types supported by RM.
+
+ @return a Map of RM settings]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The response sent by the {@code ResourceManager} to a new
+ {@code ApplicationMaster} on registration.
+ <p>
+ The response contains critical details such as:
+ <ul>
+   <li>Maximum capability for allocated resources in the cluster.</li>
+   <li>{@code ApplicationACL}s for the application.</li>
+   <li>ClientToAMToken master key.</li>
+ </ul>
+ 
+ @see ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerRequest -->
+  <class name="ReInitializeContainerRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReInitializeContainerRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="containerLaunchContext" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <param name="autoCommit" type="boolean"/>
+      <doc>
+      <![CDATA[Creates a new instance of the ReInitializationContainerRequest.
+ @param containerId Container Id.
+ @param containerLaunchContext Container Launch Context.
+ @param autoCommit AutoCommit.
+ @return ReInitializationContainerRequest.]]>
+      </doc>
+    </method>
+    <method name="getContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerId</code> of the container to re-initialize.
+
+ @return <code>ContainerId</code> of the container to re-initialize.]]>
+      </doc>
+    </method>
+    <method name="getContainerLaunchContext" return="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerLaunchContext</code> to re-initialize the container
+ with.
+
+ @return <code>ContainerLaunchContext</code> of to re-initialize the
+ container with.]]>
+      </doc>
+    </method>
+    <method name="getAutoCommit" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Check if AutoCommit is set for this ReInitialization.
+ @return If AutoCommit is set for this ReInitialization.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This encapsulates all the required fields needed for a Container
+ ReInitialization.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerResponse -->
+  <class name="ReInitializeContainerResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReInitializeContainerResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[The response to the {@link ReInitializeContainerRequest}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ReInitializeContainerResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceRequest -->
+  <class name="ReleaseSharedCacheResourceRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReleaseSharedCacheResourceRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAppId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationId</code> of the resource to be released.
+
+ @return <code>ApplicationId</code>]]>
+      </doc>
+    </method>
+    <method name="setAppId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="id" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationId</code> of the resource to be released.
+
+ @param id <code>ApplicationId</code>]]>
+      </doc>
+    </method>
+    <method name="getResourceKey" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>key</code> of the resource to be released.
+
+ @return <code>key</code>]]>
+      </doc>
+    </method>
+    <method name="setResourceKey"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <code>key</code> of the resource to be released.
+
+ @param key unique identifier for the resource]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The request from clients to release a resource in the shared cache.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceResponse -->
+  <class name="ReleaseSharedCacheResourceResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReleaseSharedCacheResourceResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[<p>
+ The response to clients from the <code>SharedCacheManager</code> when
+ releasing a resource in the shared cache.
+ </p>
+
+ <p>
+ Currently, this is empty.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ReleaseSharedCacheResourceResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest -->
+  <class name="ReservationDeleteRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReservationDeleteRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservationId" type="org.apache.hadoop.yarn.api.records.ReservationId"/>
+    </method>
+    <method name="getReservationId" return="org.apache.hadoop.yarn.api.records.ReservationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link ReservationId}, that corresponds to a valid resource
+ allocation in the scheduler (between start and end time of this
+ reservation)
+ 
+ @return the {@link ReservationId} representing the unique id of the
+         corresponding reserved resource allocation in the scheduler]]>
+      </doc>
+    </method>
+    <method name="setReservationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservationId" type="org.apache.hadoop.yarn.api.records.ReservationId"/>
+      <doc>
+      <![CDATA[Set the {@link ReservationId}, that correspond to a valid resource
+ allocation in the scheduler (between start and end time of this
+ reservation)
+ 
+ @param reservationId the {@link ReservationId} representing the the unique
+          id of the corresponding reserved resource allocation in the
+          scheduler]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@link ReservationDeleteRequest} captures the set of requirements the user
+ has to delete an existing reservation.
+ 
+ @see ReservationDefinition]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse -->
+  <class name="ReservationDeleteResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReservationDeleteResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[{@link ReservationDeleteResponse} contains the answer of the admission
+ control system in the {@code ResourceManager} to a reservation delete
+ operation. Currently response is empty if the operation was successful, if
+ not an exception reporting reason for a failure.
+ 
+ @see ReservationDefinition]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest -->
+  <class name="ReservationListRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReservationListRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queue" type="java.lang.String"/>
+      <param name="reservationId" type="java.lang.String"/>
+      <param name="startTime" type="long"/>
+      <param name="endTime" type="long"/>
+      <param name="includeReservationAllocations" type="boolean"/>
+      <doc>
+      <![CDATA[The {@link ReservationListRequest} will use the reservationId to search for
+ reservations to list if it is provided. Otherwise, it will select active
+ reservations within the startTime and endTime (inclusive).
+
+ @param queue Required. Cannot be null or empty. Refers to the reservable
+              queue in the scheduler that was selected when creating a
+              reservation submission {@link ReservationSubmissionRequest}.
+ @param reservationId Optional. String representation of
+                     {@code ReservationId} If provided, other fields will
+                     be ignored.
+ @param startTime Optional. If provided, only reservations that
+                end after the startTime will be selected. This defaults
+                to 0 if an invalid number is used.
+ @param endTime Optional. If provided, only reservations that
+                start on or before endTime will be selected. This defaults
+                to Long.MAX_VALUE if an invalid number is used.
+ @param includeReservationAllocations Optional. Flag that
+                determines whether the entire reservation allocations are
+                to be returned. Reservation allocations are subject to
+                change in the event of re-planning as described by
+                {@code ReservationDefinition}.
+ @return the list of reservations via  {@link ReservationListRequest}]]>
+      </doc>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queue" type="java.lang.String"/>
+      <param name="reservationId" type="java.lang.String"/>
+      <param name="includeReservationAllocations" type="boolean"/>
+      <doc>
+      <![CDATA[The {@link ReservationListRequest} will use the reservationId to search for
+ reservations to list if it is provided. Otherwise, it will select active
+ reservations within the startTime and endTime (inclusive).
+
+ @param queue Required. Cannot be null or empty. Refers to the reservable
+              queue in the scheduler that was selected when creating a
+              reservation submission {@link ReservationSubmissionRequest}.
+ @param reservationId Optional. String representation of
+                     {@code ReservationId} If provided, other fields will
+                     be ignored.
+ @param includeReservationAllocations Optional. Flag that
+                determines whether the entire reservation allocations are
+                to be returned. Reservation allocations are subject to
+                change in the event of re-planning as described by
+                {@code ReservationDefinition}.
+ @return the list of reservations via {@link ReservationListRequest}]]>
+      </doc>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queue" type="java.lang.String"/>
+      <param name="reservationId" type="java.lang.String"/>
+      <doc>
+      <![CDATA[The {@link ReservationListRequest} will use the reservationId to search for
+ reservations to list if it is provided. Otherwise, it will select active
+ reservations within the startTime and endTime (inclusive).
+
+ @param queue Required. Cannot be null or empty. Refers to the reservable
+              queue in the scheduler that was selected when creating a
+              reservation submission {@link ReservationSubmissionRequest}.
+ @param reservationId Optional. String representation of
+                     {@code ReservationId} If provided, other fields will
+                     be ignored.
+ @return the list of reservations via {@link ReservationListRequest}]]>
+      </doc>
+    </method>
+    <method name="getQueue" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get queue name to use to find reservations.
+
+ @return the queue name to use to find reservations.]]>
+      </doc>
+    </method>
+    <method name="setQueue"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set queue name to use to find resource allocations.
+
+ @param queue Required. Cannot be null or empty.]]>
+      </doc>
+    </method>
+    <method name="getReservationId" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the reservation id to use to find a reservation.
+
+ @return the reservation id of the reservation.]]>
+      </doc>
+    </method>
+    <method name="setReservationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservationId" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the reservation id to use to find a reservation.
+
+ @param reservationId Optional. String representation of
+                     {@code ReservationId} If provided, other fields will
+                     be ignored.]]>
+      </doc>
+    </method>
+    <method name="getStartTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the start time to use to search for reservations.
+ When this is set, reservations that start before this start
+ time are ignored.
+
+ @return the start time to use to search for reservations.]]>
+      </doc>
+    </method>
+    <method name="setStartTime"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="startTime" type="long"/>
+      <doc>
+      <![CDATA[Set the start time to use to search for reservations.
+ When this is set, reservations that start before this start
+ time are ignored.
+
+ @param startTime Optional. If provided, only reservations that
+                end after the startTime will be selected. This defaults
+                to 0 if an invalid number is used.]]>
+      </doc>
+    </method>
+    <method name="getEndTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the end time to use to search for reservations.
+ When this is set, reservations that start after this end
+ time are ignored.
+
+ @return the end time to use to search for reservations.]]>
+      </doc>
+    </method>
+    <method name="setEndTime"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="endTime" type="long"/>
+      <doc>
+      <![CDATA[Set the end time to use to search for reservations.
+ When this is set, reservations that start after this end
+ time are ignored.
+
+ @param endTime Optional. If provided, only reservations that
+                start before endTime will be selected. This defaults
+                to Long.MAX_VALUE if an invalid number is used.]]>
+      </doc>
+    </method>
+    <method name="getIncludeResourceAllocations" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the boolean representing whether or not the user
+ is requesting the full resource allocation.
+ If this is true, the full resource allocation will
+ be included in the response.
+
+ @return the end time to use to search for reservations.]]>
+      </doc>
+    </method>
+    <method name="setIncludeResourceAllocations"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="includeReservationAllocations" type="boolean"/>
+      <doc>
+      <![CDATA[Set the boolean representing whether or not the user
+ is requesting the full resource allocation.
+ If this is true, the full resource allocation will
+ be included in the response.
+
+ @param includeReservationAllocations Optional. Flag that
+                determines whether the entire list of
+                {@code ResourceAllocationRequest} will be returned.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@link ReservationListRequest} captures the set of requirements the
+ user has to list reservations.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse -->
+  <class name="ReservationListResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReservationListResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getReservationAllocationState" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of {@link ReservationAllocationState}, that corresponds
+ to a reservation in the scheduler.
+
+ @return the list of {@link ReservationAllocationState} which holds
+ information of a particular reservation]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@link ReservationListResponse} captures the list of reservations that the
+ user has queried.
+
+ The resulting list of {@link ReservationAllocationState} contains a list of
+ {@code ResourceAllocationRequest} representing the current state of the
+ reservation resource allocations will be returned. This is subject to change
+ in the event of re-planning a described by {@code ReservationDefinition}
+
+ @see ReservationAllocationState]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest -->
+  <class name="ReservationSubmissionRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReservationSubmissionRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservationDefinition" type="org.apache.hadoop.yarn.api.records.ReservationDefinition"/>
+      <param name="queueName" type="java.lang.String"/>
+      <param name="reservationId" type="org.apache.hadoop.yarn.api.records.ReservationId"/>
+    </method>
+    <method name="getReservationDefinition" return="org.apache.hadoop.yarn.api.records.ReservationDefinition"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link ReservationDefinition} representing the user constraints for
+ this reservation
+ 
+ @return the reservation definition representing user constraints]]>
+      </doc>
+    </method>
+    <method name="setReservationDefinition"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservationDefinition" type="org.apache.hadoop.yarn.api.records.ReservationDefinition"/>
+      <doc>
+      <![CDATA[Set the {@link ReservationDefinition} representing the user constraints for
+ this reservation
+ 
+ @param reservationDefinition the reservation request representing the
+          reservation]]>
+      </doc>
+    </method>
+    <method name="getQueue" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the name of the {@code Plan} that corresponds to the name of the
+ {@link QueueInfo} in the scheduler to which the reservation will be
+ submitted to.
+ 
+ @return the name of the {@code Plan} that corresponds to the name of the
+         {@link QueueInfo} in the scheduler to which the reservation will be
+         submitted to]]>
+      </doc>
+    </method>
+    <method name="setQueue"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queueName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the name of the {@code Plan} that corresponds to the name of the
+ {@link QueueInfo} in the scheduler to which the reservation will be
+ submitted to
+ 
+ @param queueName the name of the parent {@code Plan} that corresponds to
+          the name of the {@link QueueInfo} in the scheduler to which the
+          reservation will be submitted to]]>
+      </doc>
+    </method>
+    <method name="getReservationId" return="org.apache.hadoop.yarn.api.records.ReservationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the reservation id that corresponds to the reservation submission.
+
+ @return reservation id that will be used to identify the reservation
+ submission.]]>
+      </doc>
+    </method>
+    <method name="setReservationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservationId" type="org.apache.hadoop.yarn.api.records.ReservationId"/>
+      <doc>
+      <![CDATA[Set the reservation id that corresponds to the reservation submission.
+
+ @param reservationId reservation id that will be used to identify the
+                      reservation submission.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@link ReservationSubmissionRequest} captures the set of requirements the
+ user has to create a reservation.
+ 
+ @see ReservationDefinition]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse -->
+  <class name="ReservationSubmissionResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReservationSubmissionResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[<p>The response sent by the <code>ResourceManager</code> to a client on
+ reservation submission.</p>
+
+ <p>Currently, this is empty.</p>
+
+ {@code ApplicationClientProtocol#submitReservation(
+ ReservationSubmissionRequest)}]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest -->
+  <class name="ReservationUpdateRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReservationUpdateRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservationDefinition" type="org.apache.hadoop.yarn.api.records.ReservationDefinition"/>
+      <param name="reservationId" type="org.apache.hadoop.yarn.api.records.ReservationId"/>
+    </method>
+    <method name="getReservationDefinition" return="org.apache.hadoop.yarn.api.records.ReservationDefinition"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link ReservationDefinition} representing the updated user
+ constraints for this reservation
+ 
+ @return the reservation definition representing user constraints]]>
+      </doc>
+    </method>
+    <method name="setReservationDefinition"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservationDefinition" type="org.apache.hadoop.yarn.api.records.ReservationDefinition"/>
+      <doc>
+      <![CDATA[Set the {@link ReservationDefinition} representing the updated user
+ constraints for this reservation
+ 
+ @param reservationDefinition the reservation request representing the
+          reservation]]>
+      </doc>
+    </method>
+    <method name="getReservationId" return="org.apache.hadoop.yarn.api.records.ReservationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link ReservationId}, that corresponds to a valid resource
+ allocation in the scheduler (between start and end time of this
+ reservation)
+ 
+ @return the {@link ReservationId} representing the unique id of the
+         corresponding reserved resource allocation in the scheduler]]>
+      </doc>
+    </method>
+    <method name="setReservationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservationId" type="org.apache.hadoop.yarn.api.records.ReservationId"/>
+      <doc>
+      <![CDATA[Set the {@link ReservationId}, that correspond to a valid resource
+ allocation in the scheduler (between start and end time of this
+ reservation)
+ 
+ @param reservationId the {@link ReservationId} representing the the unique
+          id of the corresponding reserved resource allocation in the
+          scheduler]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@link ReservationUpdateRequest} captures the set of requirements the user
+ has to update an existing reservation.
+ 
+ @see ReservationDefinition]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse -->
+  <class name="ReservationUpdateResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReservationUpdateResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[{@link ReservationUpdateResponse} contains the answer of the admission
+ control system in the {@code ResourceManager} to a reservation update
+ operation. Currently response is empty if the operation was successful, if
+ not an exception reporting reason for a failure.
+ 
+ @see ReservationDefinition]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ResourceLocalizationRequest -->
+  <class name="ResourceLocalizationRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ResourceLocalizationRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.ResourceLocalizationRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="localResources" type="java.util.Map"/>
+    </method>
+    <method name="getContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerId</code> of the container to localize resources.
+
+ @return <code>ContainerId</code> of the container to localize resources.]]>
+      </doc>
+    </method>
+    <method name="getLocalResources" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <code>LocalResource</code> required by the container.
+
+ @return all <code>LocalResource</code> required by the container]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The request sent by the ApplicationMaster to ask for localizing resources.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ResourceLocalizationRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.ResourceLocalizationResponse -->
+  <class name="ResourceLocalizationResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ResourceLocalizationResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[The response to the {@link ResourceLocalizationRequest}]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.ResourceLocalizationResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.RestartContainerResponse -->
+  <class name="RestartContainerResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RestartContainerResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[The response to a restart Container request.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.RestartContainerResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.RollbackResponse -->
+  <class name="RollbackResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RollbackResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Response to a Rollback request.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.RollbackResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest -->
+  <class name="SignalContainerRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SignalContainerRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="signalContainerCommand" type="org.apache.hadoop.yarn.api.records.SignalContainerCommand"/>
+    </method>
+    <method name="getContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerId</code> of the container to signal.
+ @return <code>ContainerId</code> of the container to signal.]]>
+      </doc>
+    </method>
+    <method name="setContainerId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <doc>
+      <![CDATA[Set the <code>ContainerId</code> of the container to signal.]]>
+      </doc>
+    </method>
+    <method name="getCommand" return="org.apache.hadoop.yarn.api.records.SignalContainerCommand"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>SignalContainerCommand</code> of the signal request.
+ @return <code>SignalContainerCommand</code> of the signal request.]]>
+      </doc>
+    </method>
+    <method name="setCommand"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="command" type="org.apache.hadoop.yarn.api.records.SignalContainerCommand"/>
+      <doc>
+      <![CDATA[Set the <code>SignalContainerCommand</code> of the signal request.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by the client to the <code>ResourceManager</code>
+ or by the <code>ApplicationMaster</code> to the <code>NodeManager</code>
+ to signal a container.
+ @see SignalContainerCommand </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse -->
+  <class name="SignalContainerResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SignalContainerResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[<p>The response sent by the <code>ResourceManager</code> to the client
+ signalling a container.</p>
+
+ <p>Currently it's empty.</p>
+
+ @see ApplicationClientProtocol#signalToContainer(SignalContainerRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.SignalContainerResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest -->
+  <class name="StartContainerRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="StartContainerRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Token"/>
+    </method>
+    <method name="getContainerLaunchContext" return="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerLaunchContext</code> for the container to be started
+ by the <code>NodeManager</code>.
+ 
+ @return <code>ContainerLaunchContext</code> for the container to be started
+         by the <code>NodeManager</code>]]>
+      </doc>
+    </method>
+    <method name="setContainerLaunchContext"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <doc>
+      <![CDATA[Set the <code>ContainerLaunchContext</code> for the container to be started
+ by the <code>NodeManager</code>
+ @param context <code>ContainerLaunchContext</code> for the container to be 
+                started by the <code>NodeManager</code>]]>
+      </doc>
+    </method>
+    <method name="getContainerToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the container token to be used for authorization during starting
+ container.
+ <p>
+ Note: {@link NMToken} will be used for authenticating communication with
+ {@code NodeManager}.
+ @return the container token to be used for authorization during starting
+ container.
+ @see NMToken
+ @see ContainerManagementProtocol#startContainers(StartContainersRequest)]]>
+      </doc>
+    </method>
+    <method name="setContainerToken"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Token"/>
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by the <code>ApplicationMaster</code> to the
+ <code>NodeManager</code> to <em>start</em> a container.</p>
+ 
+ <p>The <code>ApplicationMaster</code> has to provide details such as
+ allocated resource capability, security tokens (if enabled), command
+ to be executed to start the container, environment for the process, 
+ necessary binaries/jar/shared-objects etc. via the 
+ {@link ContainerLaunchContext}.</p>
+
+ @see ContainerManagementProtocol#startContainers(StartContainersRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest -->
+  <class name="StartContainersRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="StartContainersRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="requests" type="java.util.List"/>
+    </method>
+    <method name="getStartContainerRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a list of {@link StartContainerRequest} to start containers.
+ @return a list of {@link StartContainerRequest} to start containers.]]>
+      </doc>
+    </method>
+    <method name="setStartContainerRequests"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set a list of {@link StartContainerRequest} to start containers.
+ @param request a list of {@link StartContainerRequest} to start containers]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The request which contains a list of {@link StartContainerRequest} sent by
+ the <code>ApplicationMaster</code> to the <code>NodeManager</code> to
+ <em>start</em> containers.
+ </p>
+ 
+ <p>
+ In each {@link StartContainerRequest}, the <code>ApplicationMaster</code> has
+ to provide details such as allocated resource capability, security tokens (if
+ enabled), command to be executed to start the container, environment for the
+ process, necessary binaries/jar/shared-objects etc. via the
+ {@link ContainerLaunchContext}.
+ </p>
+ 
+ @see ContainerManagementProtocol#startContainers(StartContainersRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse -->
+  <class name="StartContainersResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="StartContainersResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getSuccessfullyStartedContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <code>ContainerId</code> s of the containers that are
+ started successfully.
+ 
+ @return the list of <code>ContainerId</code> s of the containers that are
+         started successfully.
+ @see ContainerManagementProtocol#startContainers(StartContainersRequest)]]>
+      </doc>
+    </method>
+    <method name="getFailedRequests" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the containerId-to-exception map in which the exception indicates error
+ from per container for failed requests
+ @return map of containerId-to-exception]]>
+      </doc>
+    </method>
+    <method name="getAllServicesMetaData" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[<p>
+ Get the meta-data from all auxiliary services running on the
+ <code>NodeManager</code>.
+ </p>
+ <p>
+ The meta-data is returned as a Map between the auxiliary service names and
+ their corresponding per service meta-data as an opaque blob
+ <code>ByteBuffer</code>
+ </p>
+ 
+ <p>
+ To be able to interpret the per-service meta-data, you should consult the
+ documentation for the Auxiliary-service configured on the NodeManager
+ </p>
+ 
+ @return a Map between the names of auxiliary services and their
+         corresponding meta-data]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>NodeManager</code> to the
+ <code>ApplicationMaster</code> when asked to <em>start</em> an allocated
+ container.
+ </p>
+ 
+ @see ContainerManagementProtocol#startContainers(StartContainersRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.StartContainersResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest -->
+  <class name="StopContainersRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="StopContainersRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerIds" type="java.util.List"/>
+    </method>
+    <method name="getContainerIds" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerId</code>s of the containers to be stopped.
+ @return <code>ContainerId</code>s of containers to be stopped]]>
+      </doc>
+    </method>
+    <method name="setContainerIds"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerIds" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the <code>ContainerId</code>s of the containers to be stopped.
+ @param containerIds <code>ContainerId</code>s of the containers to be stopped]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by the <code>ApplicationMaster</code> to the
+ <code>NodeManager</code> to <em>stop</em> containers.</p>
+ 
+ @see ContainerManagementProtocol#stopContainers(StopContainersRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse -->
+  <class name="StopContainersResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="StopContainersResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getSuccessfullyStoppedContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of containerIds of successfully stopped containers.
+ 
+ @return the list of containerIds of successfully stopped containers.]]>
+      </doc>
+    </method>
+    <method name="getFailedRequests" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the containerId-to-exception map in which the exception indicates error
+ from per container for failed requests
+ @return map of containerId-to-exception]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>NodeManager</code> to the
+ <code>ApplicationMaster</code> when asked to <em>stop</em> allocated
+ containers.
+ </p>
+ 
+ @see ContainerManagementProtocol#stopContainers(StopContainersRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest -->
+  <class name="SubmitApplicationRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SubmitApplicationRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"/>
+    </method>
+    <method name="getApplicationSubmissionContext" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationSubmissionContext</code> for the application.
+ @return <code>ApplicationSubmissionContext</code> for the application]]>
+      </doc>
+    </method>
+    <method name="setApplicationSubmissionContext"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationSubmissionContext</code> for the application.
+ @param context <code>ApplicationSubmissionContext</code> for the 
+                application]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>The request sent by a client to <em>submit an application</em> to the 
+ <code>ResourceManager</code>.</p>
+ 
+ <p>The request, via {@link ApplicationSubmissionContext}, contains
+ details such as queue, {@link Resource} required to run the 
+ <code>ApplicationMaster</code>, the equivalent of 
+ {@link ContainerLaunchContext} for launching the 
+ <code>ApplicationMaster</code> etc.
+ 
+ @see ApplicationClientProtocol#submitApplication(SubmitApplicationRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse -->
+  <class name="SubmitApplicationResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SubmitApplicationResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[<p>The response sent by the <code>ResourceManager</code> to a client on
+ application submission.</p>
+ 
+ <p>Currently, this is empty.</p>
+ 
+ @see ApplicationClientProtocol#submitApplication(SubmitApplicationRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityRequest -->
+  <class name="UpdateApplicationPriorityRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UpdateApplicationPriorityRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+    </method>
+    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationId</code> of the application.
+ 
+ @return <code>ApplicationId</code> of the application]]>
+      </doc>
+    </method>
+    <method name="setApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationId</code> of the application.
+ 
+ @param applicationId <code>ApplicationId</code> of the application]]>
+      </doc>
+    </method>
+    <method name="getApplicationPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>Priority</code> of the application to be set.
+ 
+ @return <code>Priority</code> of the application to be set.]]>
+      </doc>
+    </method>
+    <method name="setApplicationPriority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <doc>
+      <![CDATA[Set the <code>Priority</code> of the application.
+ 
+ @param priority <code>Priority</code> of the application]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The request sent by the client to the <code>ResourceManager</code> to set or
+ update the application priority.
+ </p>
+ <p>
+ The request includes the {@link ApplicationId} of the application and
+ {@link Priority} to be set for an application
+ </p>
+ 
+ @see ApplicationClientProtocol#updateApplicationPriority(UpdateApplicationPriorityRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityResponse -->
+  <class name="UpdateApplicationPriorityResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UpdateApplicationPriorityResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+    </method>
+    <method name="getApplicationPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>Priority</code> of the application to be set.
+ @return Updated <code>Priority</code> of the application.]]>
+      </doc>
+    </method>
+    <method name="setApplicationPriority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <doc>
+      <![CDATA[Set the <code>Priority</code> of the application.
+
+ @param priority <code>Priority</code> of the application]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>ResourceManager</code> to the client on update
+ the application priority.
+ </p>
+ <p>
+ A response without exception means that the move has completed successfully.
+ </p>
+ 
+ @see ApplicationClientProtocol#updateApplicationPriority(UpdateApplicationPriorityRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest -->
+  <class name="UpdateApplicationTimeoutsRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UpdateApplicationTimeoutsRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="applicationTimeouts" type="java.util.Map"/>
+    </method>
+    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationId</code> of the application.
+ @return <code>ApplicationId</code> of the application]]>
+      </doc>
+    </method>
+    <method name="setApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationId</code> of the application.
+ @param applicationId <code>ApplicationId</code> of the application]]>
+      </doc>
+    </method>
+    <method name="getApplicationTimeouts" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <code>ApplicationTimeouts</code> of the application. Timeout value is
+ in ISO8601 standard with format <b>yyyy-MM-dd'T'HH:mm:ss.SSSZ</b>.
+ @return all <code>ApplicationTimeouts</code> of the application.]]>
+      </doc>
+    </method>
+    <method name="setApplicationTimeouts"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTimeouts" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationTimeouts</code> for the application. Timeout value
+ is absolute. Timeout value should meet ISO8601 format. Support ISO8601
+ format is <b>yyyy-MM-dd'T'HH:mm:ss.SSSZ</b>. All pre-existing Map entries
+ are cleared before adding the new Map.
+ @param applicationTimeouts <code>ApplicationTimeouts</code>s for the
+          application]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The request sent by the client to the <code>ResourceManager</code> to set or
+ update the application timeout.
+ </p>
+ <p>
+ The request includes the {@link ApplicationId} of the application and timeout
+ to be set for an application
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse -->
+  <class name="UpdateApplicationTimeoutsResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UpdateApplicationTimeoutsResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationTimeouts" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <code>ApplicationTimeouts</code> of the application. Timeout value is
+ in ISO8601 standard with format <b>yyyy-MM-dd'T'HH:mm:ss.SSSZ</b>.
+ @return all <code>ApplicationTimeouts</code> of the application.]]>
+      </doc>
+    </method>
+    <method name="setApplicationTimeouts"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTimeouts" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationTimeouts</code> for the application. Timeout value
+ is absolute. Timeout value should meet ISO8601 format. Support ISO8601
+ format is <b>yyyy-MM-dd'T'HH:mm:ss.SSSZ</b>. All pre-existing Map entries
+ are cleared before adding the new Map.
+ @param applicationTimeouts <code>ApplicationTimeouts</code>s for the
+          application]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response sent by the <code>ResourceManager</code> to the client on update
+ application timeout.
+ </p>
+ <p>
+ A response without exception means that the update has completed
+ successfully.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceRequest -->
+  <class name="UseSharedCacheResourceRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UseSharedCacheResourceRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAppId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationId</code> of the resource to be used.
+
+ @return <code>ApplicationId</code>]]>
+      </doc>
+    </method>
+    <method name="setAppId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="id" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationId</code> of the resource to be used.
+
+ @param id <code>ApplicationId</code>]]>
+      </doc>
+    </method>
+    <method name="getResourceKey" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>key</code> of the resource to be used.
+
+ @return <code>key</code>]]>
+      </doc>
+    </method>
+    <method name="setResourceKey"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <code>key</code> of the resource to be used.
+
+ @param key unique identifier for the resource]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The request from clients to the <code>SharedCacheManager</code> that claims a
+ resource in the shared cache.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceResponse -->
+  <class name="UseSharedCacheResourceResponse" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UseSharedCacheResourceResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getPath" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>Path</code> corresponding to the requested resource in the
+ shared cache.
+
+ @return String A <code>Path</code> if the resource exists in the shared
+         cache, <code>null</code> otherwise]]>
+      </doc>
+    </method>
+    <method name="setPath"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <code>Path</code> corresponding to a resource in the shared cache.
+
+ @param p A <code>Path</code> corresponding to a resource in the shared
+          cache]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The response from the SharedCacheManager to the client that indicates whether
+ a requested resource exists in the cache.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.protocolrecords.UseSharedCacheResourceResponse -->
+</package>
+<package name="org.apache.hadoop.yarn.api.records">
+  <!-- start class org.apache.hadoop.yarn.api.records.AMCommand -->
+  <class name="AMCommand" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.AMCommand[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.AMCommand"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Command sent by the Resource Manager to the Application Master in the 
+ AllocateResponse 
+ @see AllocateResponse]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.AMCommand -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ApplicationAccessType -->
+  <class name="ApplicationAccessType" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.ApplicationAccessType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.ApplicationAccessType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Application access types.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ApplicationAccessType -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ApplicationAttemptId -->
+  <class name="ApplicationAttemptId" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="ApplicationAttemptId"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="attemptId" type="int"/>
+    </method>
+    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationId</code> of the <code>ApplicationAttempId</code>. 
+ @return <code>ApplicationId</code> of the <code>ApplicationAttempId</code>]]>
+      </doc>
+    </method>
+    <method name="getAttemptId" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>attempt id</code> of the <code>Application</code>.
+ @return <code>attempt id</code> of the <code>Application</code>]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="build"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="fromString" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appAttemptIdStr" type="java.lang.String"/>
+    </method>
+    <field name="appAttemptIdStrPrefix" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[<p><code>ApplicationAttemptId</code> denotes the particular <em>attempt</em>
+ of an <code>ApplicationMaster</code> for a given {@link ApplicationId}.</p>
+ 
+ <p>Multiple attempts might be needed to run an application to completion due
+ to temporal failures of the <code>ApplicationMaster</code> such as hardware
+ failures, connectivity issues etc. on the node on which it was scheduled.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ApplicationAttemptId -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ApplicationAttemptReport -->
+  <class name="ApplicationAttemptReport" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ApplicationAttemptReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="host" type="java.lang.String"/>
+      <param name="rpcPort" type="int"/>
+      <param name="url" type="java.lang.String"/>
+      <param name="oUrl" type="java.lang.String"/>
+      <param name="diagnostics" type="java.lang.String"/>
+      <param name="state" type="org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState"/>
+      <param name="amContainerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+    </method>
+    <method name="getYarnApplicationAttemptState" return="org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>YarnApplicationAttemptState</em> of the application attempt.
+ 
+ @return <em>YarnApplicationAttemptState</em> of the application attempt]]>
+      </doc>
+    </method>
+    <method name="getRpcPort" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>RPC port</em> of this attempt <code>ApplicationMaster</code>.
+ 
+ @return <em>RPC port</em> of this attempt <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="getHost" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>host</em> on which this attempt of
+ <code>ApplicationMaster</code> is running.
+ 
+ @return <em>host</em> on which this attempt of
+         <code>ApplicationMaster</code> is running]]>
+      </doc>
+    </method>
+    <method name="getDiagnostics" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>diagnositic information</em> of the application attempt in case
+ of errors.
+ 
+ @return <em>diagnositic information</em> of the application attempt in case
+         of errors]]>
+      </doc>
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>tracking url</em> for the application attempt.
+ 
+ @return <em>tracking url</em> for the application attempt]]>
+      </doc>
+    </method>
+    <method name="getOriginalTrackingUrl" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>original tracking url</em> for the application attempt.
+ 
+ @return <em>original tracking url</em> for the application attempt]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationAttemptId</code> of this attempt of the
+ application
+ 
+ @return <code>ApplicationAttemptId</code> of the attempt]]>
+      </doc>
+    </method>
+    <method name="getAMContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerId</code> of AMContainer for this attempt
+ 
+ @return <code>ContainerId</code> of the attempt]]>
+      </doc>
+    </method>
+    <method name="getStartTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinishTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>finish time</em> of the application.
+ 
+ @return <em>finish time</em> of the application]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@code ApplicationAttemptReport} is a report of an application attempt.
+ <p>
+ It includes details such as:
+ <ul>
+   <li>{@link ApplicationAttemptId} of the application.</li>
+   <li>Host on which the <code>ApplicationMaster</code> of this attempt is
+   running.</li>
+   <li>RPC port of the <code>ApplicationMaster</code> of this attempt.</li>
+   <li>Tracking URL.</li>
+   <li>Diagnostic information in case of errors.</li>
+   <li>{@link YarnApplicationAttemptState} of the application attempt.</li>
+   <li>{@link ContainerId} of the master Container.</li>
+ </ul>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ApplicationAttemptReport -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ApplicationId -->
+  <class name="ApplicationId" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="ApplicationId"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="clusterTimestamp" type="long"/>
+      <param name="id" type="int"/>
+    </method>
+    <method name="getId" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the short integer identifier of the <code>ApplicationId</code>
+ which is unique for all applications started by a particular instance
+ of the <code>ResourceManager</code>.
+ @return short integer identifier of the <code>ApplicationId</code>]]>
+      </doc>
+    </method>
+    <method name="getClusterTimestamp" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>start time</em> of the <code>ResourceManager</code> which is 
+ used to generate globally unique <code>ApplicationId</code>.
+ @return <em>start time</em> of the <code>ResourceManager</code>]]>
+      </doc>
+    </method>
+    <method name="build"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="fromString" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appIdStr" type="java.lang.String"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <field name="appIdStrPrefix" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[<p><code>ApplicationId</code> represents the <em>globally unique</em> 
+ identifier for an application.</p>
+ 
+ <p>The globally unique nature of the identifier is achieved by using the 
+ <em>cluster timestamp</em> i.e. start-time of the 
+ <code>ResourceManager</code> along with a monotonically increasing counter
+ for the application.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ApplicationId -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ApplicationReport -->
+  <class name="ApplicationReport" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ApplicationReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationId</code> of the application.
+ @return <code>ApplicationId</code> of the application]]>
+      </doc>
+    </method>
+    <method name="getCurrentApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationAttemptId</code> of the current
+ attempt of the application
+ @return <code>ApplicationAttemptId</code> of the attempt]]>
+      </doc>
+    </method>
+    <method name="getUser" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>user</em> who submitted the application.
+ @return <em>user</em> who submitted the application]]>
+      </doc>
+    </method>
+    <method name="getQueue" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>queue</em> to which the application was submitted.
+ @return <em>queue</em> to which the application was submitted]]>
+      </doc>
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user-defined <em>name</em> of the application.
+ @return <em>name</em> of the application]]>
+      </doc>
+    </method>
+    <method name="getHost" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>host</em> on which the <code>ApplicationMaster</code>
+ is running.
+ @return <em>host</em> on which the <code>ApplicationMaster</code>
+         is running]]>
+      </doc>
+    </method>
+    <method name="getRpcPort" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>RPC port</em> of the <code>ApplicationMaster</code>.
+ @return <em>RPC port</em> of the <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="getClientToAMToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>client token</em> for communicating with the
+ <code>ApplicationMaster</code>.
+ <p>
+ <em>ClientToAMToken</em> is the security token used by the AMs to verify
+ authenticity of any <code>client</code>.
+ </p>
+
+ <p>
+ The <code>ResourceManager</code>, provides a secure token (via
+ {@link ApplicationReport#getClientToAMToken()}) which is verified by the
+ ApplicationMaster when the client directly talks to an AM.
+ </p>
+ @return <em>client token</em> for communicating with the
+ <code>ApplicationMaster</code>]]>
+      </doc>
+    </method>
+    <method name="getYarnApplicationState" return="org.apache.hadoop.yarn.api.records.YarnApplicationState"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>YarnApplicationState</code> of the application.
+ @return <code>YarnApplicationState</code> of the application]]>
+      </doc>
+    </method>
+    <method name="getDiagnostics" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get  the <em>diagnositic information</em> of the application in case of
+ errors.
+ @return <em>diagnositic information</em> of the application in case
+         of errors]]>
+      </doc>
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>tracking url</em> for the application.
+ @return <em>tracking url</em> for the application]]>
+      </doc>
+    </method>
+    <method name="getStartTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>start time</em> of the application.
+ @return <em>start time</em> of the application]]>
+      </doc>
+    </method>
+    <method name="getSubmitTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getLaunchTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinishTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>finish time</em> of the application.
+ @return <em>finish time</em> of the application]]>
+      </doc>
+    </method>
+    <method name="getFinalApplicationStatus" return="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>final finish status</em> of the application.
+ @return <em>final finish status</em> of the application]]>
+      </doc>
+    </method>
+    <method name="getApplicationResourceUsageReport" return="org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Retrieve the structure containing the job resources for this application
+ @return the job resources structure for this application]]>
+      </doc>
+    </method>
+    <method name="getProgress" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the application's progress ( range 0.0 to 1.0 )
+ @return application's progress]]>
+      </doc>
+    </method>
+    <method name="getApplicationType" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the application's Type 
+ @return application's Type]]>
+      </doc>
+    </method>
+    <method name="getApplicationTags" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get all tags corresponding to the application
+ @return Application's tags]]>
+      </doc>
+    </method>
+    <method name="getAMRMToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the AMRM token of the application.
+ <p>
+ The AMRM token is required for AM to RM scheduling operations. For 
+ managed Application Masters YARN takes care of injecting it. For unmanaged
+ Applications Masters, the token must be obtained via this method and set
+ in the {@link org.apache.hadoop.security.UserGroupInformation} of the
+ current user.
+ <p>
+ The AMRM token will be returned only if all the following conditions are
+ met:
+ <ul>
+   <li>the requester is the owner of the ApplicationMaster</li>
+   <li>the application master is an unmanaged ApplicationMaster</li>
+   <li>the application master is in ACCEPTED state</li>
+ </ul>
+ Else this method returns NULL.
+ 
+ @return the AM to RM token if available.]]>
+      </doc>
+    </method>
+    <method name="getLogAggregationStatus" return="org.apache.hadoop.yarn.api.records.LogAggregationStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get log aggregation status for the application
+ @return Application's log aggregation status]]>
+      </doc>
+    </method>
+    <method name="isUnmanagedApp" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return true if the AM is not managed by the RM]]>
+      </doc>
+    </method>
+    <method name="setUnmanagedApp"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="unmanagedApplication" type="boolean"/>
+      <doc>
+      <![CDATA[@param unmanagedApplication true if RM should not manage the AM]]>
+      </doc>
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get priority of the application
+
+ @return Application's priority]]>
+      </doc>
+    </method>
+    <method name="getAppNodeLabelExpression" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the default Node Label expression for all the application's containers
+
+ @return Application's NodeLabelExpression]]>
+      </doc>
+    </method>
+    <method name="setAppNodeLabelExpression"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appNodeLabelExpression" type="java.lang.String"/>
+    </method>
+    <method name="getAmNodeLabelExpression" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the default Node Label expression for all the application's containers
+
+ @return Application's NodeLabelExpression]]>
+      </doc>
+    </method>
+    <method name="setAmNodeLabelExpression"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="amNodeLabelExpression" type="java.lang.String"/>
+    </method>
+    <method name="getApplicationTimeouts" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[{@code ApplicationReport} is a report of an application.
+ <p>
+ It includes details such as:
+ <ul>
+   <li>{@link ApplicationId} of the application.</li>
+   <li>Applications user.</li>
+   <li>Application queue.</li>
+   <li>Application name.</li>
+   <li>Host on which the <code>ApplicationMaster</code> is running.</li>
+   <li>RPC port of the <code>ApplicationMaster</code>.</li>
+   <li>Tracking URL.</li>
+   <li>{@link YarnApplicationState} of the application.</li>
+   <li>Diagnostic information in case of errors.</li>
+   <li>Start time of the application.</li>
+   <li>Client {@link Token} of the application (if security is enabled).</li>
+ </ul>
+
+ @see ApplicationClientProtocol#getApplicationReport(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ApplicationReport -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport -->
+  <class name="ApplicationResourceUsageReport" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ApplicationResourceUsageReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNumUsedContainers" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of used containers.  -1 for invalid/inaccessible reports.
+ @return the number of used containers]]>
+      </doc>
+    </method>
+    <method name="getUsedResources" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the used <code>Resource</code>.  -1 for invalid/inaccessible reports.
+ @return the used <code>Resource</code>]]>
+      </doc>
+    </method>
+    <method name="getReservedResources" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the reserved <code>Resource</code>.  -1 for invalid/inaccessible reports.
+ @return the reserved <code>Resource</code>]]>
+      </doc>
+    </method>
+    <method name="getNeededResources" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the needed <code>Resource</code>.  -1 for invalid/inaccessible reports.
+ @return the needed <code>Resource</code>]]>
+      </doc>
+    </method>
+    <method name="getMemorySeconds" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the aggregated amount of memory (in megabytes) the application has
+ allocated times the number of seconds the application has been running.
+ @return the aggregated amount of memory seconds]]>
+      </doc>
+    </method>
+    <method name="getVcoreSeconds" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the aggregated number of vcores that the application has allocated
+ times the number of seconds the application has been running.
+ @return the aggregated number of vcore seconds]]>
+      </doc>
+    </method>
+    <method name="getQueueUsagePercentage" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the percentage of resources of the queue that the app is using.
+ @return the percentage of resources of the queue that the app is using.]]>
+      </doc>
+    </method>
+    <method name="getClusterUsagePercentage" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the percentage of resources of the cluster that the app is using.
+ @return the percentage of resources of the cluster that the app is using.]]>
+      </doc>
+    </method>
+    <method name="getPreemptedMemorySeconds" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the aggregated amount of memory preempted(in megabytes)
+ the application has allocated times the number of
+ seconds the application has been running.
+ @return the aggregated amount of memory seconds]]>
+      </doc>
+    </method>
+    <method name="getPreemptedVcoreSeconds" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the aggregated number of vcores preempted that the application has
+ allocated times the number of seconds the application has been running.
+ @return the aggregated number of vcore seconds]]>
+      </doc>
+    </method>
+    <method name="getResourceSecondsMap" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the aggregated number of resources that the application has
+ allocated times the number of seconds the application has been running.
+ @return map containing the resource name and aggregated resource-seconds]]>
+      </doc>
+    </method>
+    <method name="getPreemptedResourceSecondsMap" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the aggregated number of resources preempted that the application has
+ allocated times the number of seconds the application has been running.
+ @return map containing the resource name and aggregated preempted
+ resource-seconds]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Contains various scheduling metrics to be reported by UI and CLI.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext -->
+  <class name="ApplicationSubmissionContext" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ApplicationSubmissionContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="applicationName" type="java.lang.String"/>
+      <param name="queue" type="java.lang.String"/>
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="amContainer" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <param name="isUnmanagedAM" type="boolean"/>
+      <param name="cancelTokensWhenComplete" type="boolean"/>
+      <param name="maxAppAttempts" type="int"/>
+      <param name="resource" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="applicationType" type="java.lang.String"/>
+      <param name="keepContainers" type="boolean"/>
+      <param name="appLabelExpression" type="java.lang.String"/>
+      <param name="amContainerLabelExpression" type="java.lang.String"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="applicationName" type="java.lang.String"/>
+      <param name="queue" type="java.lang.String"/>
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="amContainer" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <param name="isUnmanagedAM" type="boolean"/>
+      <param name="cancelTokensWhenComplete" type="boolean"/>
+      <param name="maxAppAttempts" type="int"/>
+      <param name="resource" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="applicationType" type="java.lang.String"/>
+      <param name="keepContainers" type="boolean"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="applicationName" type="java.lang.String"/>
+      <param name="queue" type="java.lang.String"/>
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="amContainer" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <param name="isUnmanagedAM" type="boolean"/>
+      <param name="cancelTokensWhenComplete" type="boolean"/>
+      <param name="maxAppAttempts" type="int"/>
+      <param name="resource" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="applicationType" type="java.lang.String"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="applicationName" type="java.lang.String"/>
+      <param name="queue" type="java.lang.String"/>
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="amContainer" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <param name="isUnmanagedAM" type="boolean"/>
+      <param name="cancelTokensWhenComplete" type="boolean"/>
+      <param name="maxAppAttempts" type="int"/>
+      <param name="resource" type="org.apache.hadoop.yarn.api.records.Resource"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="applicationName" type="java.lang.String"/>
+      <param name="queue" type="java.lang.String"/>
+      <param name="amContainer" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <param name="isUnmanagedAM" type="boolean"/>
+      <param name="cancelTokensWhenComplete" type="boolean"/>
+      <param name="maxAppAttempts" type="int"/>
+      <param name="applicationType" type="java.lang.String"/>
+      <param name="keepContainers" type="boolean"/>
+      <param name="appLabelExpression" type="java.lang.String"/>
+      <param name="resourceRequest" type="org.apache.hadoop.yarn.api.records.ResourceRequest"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="applicationName" type="java.lang.String"/>
+      <param name="queue" type="java.lang.String"/>
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="amContainer" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <param name="isUnmanagedAM" type="boolean"/>
+      <param name="cancelTokensWhenComplete" type="boolean"/>
+      <param name="maxAppAttempts" type="int"/>
+      <param name="resource" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="applicationType" type="java.lang.String"/>
+      <param name="keepContainers" type="boolean"/>
+      <param name="attemptFailuresValidityInterval" type="long"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="applicationName" type="java.lang.String"/>
+      <param name="queue" type="java.lang.String"/>
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="amContainer" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <param name="isUnmanagedAM" type="boolean"/>
+      <param name="cancelTokensWhenComplete" type="boolean"/>
+      <param name="maxAppAttempts" type="int"/>
+      <param name="resource" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="applicationType" type="java.lang.String"/>
+      <param name="keepContainers" type="boolean"/>
+      <param name="logAggregationContext" type="org.apache.hadoop.yarn.api.records.LogAggregationContext"/>
+    </method>
+    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationId</code> of the submitted application.
+ @return <code>ApplicationId</code> of the submitted application]]>
+      </doc>
+    </method>
+    <method name="setApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationId</code> of the submitted application.
+ @param applicationId <code>ApplicationId</code> of the submitted
+                      application]]>
+      </doc>
+    </method>
+    <method name="getApplicationName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the application <em>name</em>.
+ @return application name]]>
+      </doc>
+    </method>
+    <method name="setApplicationName"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the application <em>name</em>.
+ @param applicationName application name]]>
+      </doc>
+    </method>
+    <method name="getQueue" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>queue</em> to which the application is being submitted.
+ @return <em>queue</em> to which the application is being submitted]]>
+      </doc>
+    </method>
+    <method name="setQueue"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queue" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <em>queue</em> to which the application is being submitted
+ @param queue <em>queue</em> to which the application is being submitted]]>
+      </doc>
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>Priority</code> of the application.
+ @return <code>Priority</code> of the application]]>
+      </doc>
+    </method>
+    <method name="getAMContainerSpec" return="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerLaunchContext</code> to describe the 
+ <code>Container</code> with which the <code>ApplicationMaster</code> is
+ launched.
+ @return <code>ContainerLaunchContext</code> for the 
+         <code>ApplicationMaster</code> container]]>
+      </doc>
+    </method>
+    <method name="setAMContainerSpec"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="amContainer" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <doc>
+      <![CDATA[Set the <code>ContainerLaunchContext</code> to describe the 
+ <code>Container</code> with which the <code>ApplicationMaster</code> is
+ launched.
+ @param amContainer <code>ContainerLaunchContext</code> for the 
+                    <code>ApplicationMaster</code> container]]>
+      </doc>
+    </method>
+    <method name="getUnmanagedAM" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get if the RM should manage the execution of the AM. 
+ If true, then the RM 
+ will not allocate a container for the AM and start it. It will expect the 
+ AM to be launched and connect to the RM within the AM liveliness period and 
+ fail the app otherwise. The client should launch the AM only after the RM 
+ has ACCEPTED the application and changed the <code>YarnApplicationState</code>.
+ Such apps will not be retried by the RM on app attempt failure.
+ The default value is false.
+ @return true if the AM is not managed by the RM]]>
+      </doc>
+    </method>
+    <method name="setUnmanagedAM"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="value" type="boolean"/>
+      <doc>
+      <![CDATA[@param value true if RM should not manage the AM]]>
+      </doc>
+    </method>
+    <method name="getMaxAppAttempts" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return the number of max attempts of the application to be submitted]]>
+      </doc>
+    </method>
+    <method name="setMaxAppAttempts"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="maxAppAttempts" type="int"/>
+      <doc>
+      <![CDATA[Set the number of max attempts of the application to be submitted. WARNING:
+ it should be no larger than the global number of max attempts in the Yarn
+ configuration.
+ @param maxAppAttempts the number of max attempts of the application
+ to be submitted.]]>
+      </doc>
+    </method>
+    <method name="getResource" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the resource required by the <code>ApplicationMaster</code> for this
+ application. Please note this will be DEPRECATED, use <em>getResource</em>
+ in <em>getAMContainerResourceRequest</em> instead.
+ 
+ @return the resource required by the <code>ApplicationMaster</code> for
+         this application.]]>
+      </doc>
+    </method>
+    <method name="setResource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resource" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Set the resource required by the <code>ApplicationMaster</code> for this
+ application.
+
+ @param resource the resource required by the <code>ApplicationMaster</code>
+ for this application.]]>
+      </doc>
+    </method>
+    <method name="getApplicationType" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the application type
+ 
+ @return the application type]]>
+      </doc>
+    </method>
+    <method name="setApplicationType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationType" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the application type
+ 
+ @param applicationType the application type]]>
+      </doc>
+    </method>
+    <method name="getKeepContainersAcrossApplicationAttempts" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the flag which indicates whether to keep containers across application
+ attempts or not.
+ 
+ @return the flag which indicates whether to keep containers across
+         application attempts or not.]]>
+      </doc>
+    </method>
+    <method name="setKeepContainersAcrossApplicationAttempts"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="keepContainers" type="boolean"/>
+      <doc>
+      <![CDATA[Set the flag which indicates whether to keep containers across application
+ attempts.
+ <p>
+ For managed AM, if the flag is true, running containers will not be killed
+ when application attempt fails and these containers will be retrieved by
+ the new application attempt on registration via
+ {@link ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)}.
+ </p>
+ <p>
+ For unmanaged AM, if the flag is true, RM allows re-register and returns
+ the running containers in the same attempt back to the UAM for HA.
+ </p>
+
+ @param keepContainers the flag which indicates whether to keep containers
+          across application attempts.]]>
+      </doc>
+    </method>
+    <method name="getApplicationTags" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get tags for the application
+
+ @return the application tags]]>
+      </doc>
+    </method>
+    <method name="setApplicationTags"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tags" type="java.util.Set"/>
+      <doc>
+      <![CDATA[Set tags for the application. A maximum of
+ {@link YarnConfiguration#RM_APPLICATION_MAX_TAGS} are allowed
+ per application. Each tag can be at most
+ {@link YarnConfiguration#RM_APPLICATION_MAX_TAG_LENGTH}
+ characters, and can contain only ASCII characters.
+
+ @param tags tags to set]]>
+      </doc>
+    </method>
+    <method name="getNodeLabelExpression" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get node-label-expression for this app. If this is set, all containers of
+ this application without setting node-label-expression in ResurceRequest
+ will get allocated resources on only those nodes that satisfy this
+ node-label-expression.
+ 
+ If different node-label-expression of this app and ResourceRequest are set
+ at the same time, the one set in ResourceRequest will be used when
+ allocating container
+ 
+ @return node-label-expression for this app]]>
+      </doc>
+    </method>
+    <method name="setNodeLabelExpression"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeLabelExpression" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set node-label-expression for this app
+ @param nodeLabelExpression node-label-expression of this app]]>
+      </doc>
+    </method>
+    <method name="getAMContainerResourceRequest" return="org.apache.hadoop.yarn.api.records.ResourceRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="See {@link #getAMContainerResourceRequests()}">
+      <doc>
+      <![CDATA[Get the ResourceRequest of the AM container.
+
+ If this is not null, scheduler will use this to acquire resource for AM
+ container.
+
+ If this is null, scheduler will assemble a ResourceRequest by using
+ <em>getResource</em> and <em>getPriority</em> of
+ <em>ApplicationSubmissionContext</em>.
+
+ Number of containers and Priority will be ignored.
+
+ @return ResourceRequest of the AM container
+ @deprecated See {@link #getAMContainerResourceRequests()}]]>
+      </doc>
+    </method>
+    <method name="setAMContainerResourceRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="See {@link #setAMContainerResourceRequests(List)}">
+      <param name="request" type="org.apache.hadoop.yarn.api.records.ResourceRequest"/>
+      <doc>
+      <![CDATA[Set ResourceRequest of the AM container
+ @param request of the AM container
+ @deprecated See {@link #setAMContainerResourceRequests(List)}]]>
+      </doc>
+    </method>
+    <method name="getAMContainerResourceRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the ResourceRequests of the AM container.
+
+ If this is not null, scheduler will use this to acquire resource for AM
+ container.
+
+ If this is null, scheduler will use the ResourceRequest as determined by
+ <em>getAMContainerResourceRequest</em> and its behavior.
+
+ Number of containers and Priority will be ignored.
+
+ @return List of ResourceRequests of the AM container]]>
+      </doc>
+    </method>
+    <method name="setAMContainerResourceRequests"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="requests" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set ResourceRequests of the AM container.
+ @param requests of the AM container]]>
+      </doc>
+    </method>
+    <method name="getAttemptFailuresValidityInterval" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the attemptFailuresValidityInterval in milliseconds for the application
+
+ @return the attemptFailuresValidityInterval]]>
+      </doc>
+    </method>
+    <method name="setAttemptFailuresValidityInterval"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attemptFailuresValidityInterval" type="long"/>
+      <doc>
+      <![CDATA[Set the attemptFailuresValidityInterval in milliseconds for the application
+ @param attemptFailuresValidityInterval]]>
+      </doc>
+    </method>
+    <method name="getLogAggregationContext" return="org.apache.hadoop.yarn.api.records.LogAggregationContext"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <code>LogAggregationContext</code> of the application
+
+ @return <code>LogAggregationContext</code> of the application]]>
+      </doc>
+    </method>
+    <method name="setLogAggregationContext"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="logAggregationContext" type="org.apache.hadoop.yarn.api.records.LogAggregationContext"/>
+      <doc>
+      <![CDATA[Set <code>LogAggregationContext</code> for the application
+
+ @param logAggregationContext
+          for the application]]>
+      </doc>
+    </method>
+    <method name="getReservationID" return="org.apache.hadoop.yarn.api.records.ReservationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the reservation id, that corresponds to a valid resource allocation in
+ the scheduler (between start and end time of the corresponding reservation)
+ 
+ @return the reservation id representing the unique id of the corresponding
+         reserved resource allocation in the scheduler]]>
+      </doc>
+    </method>
+    <method name="setReservationID"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservationID" type="org.apache.hadoop.yarn.api.records.ReservationId"/>
+      <doc>
+      <![CDATA[Set the reservation id, that correspond to a valid resource allocation in
+ the scheduler (between start and end time of the corresponding reservation)
+ 
+ @param reservationID representing the unique id of the
+          corresponding reserved resource allocation in the scheduler]]>
+      </doc>
+    </method>
+    <method name="getApplicationTimeouts" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <code>ApplicationTimeouts</code> of the application. Timeout value is
+ in seconds.
+ @return all <code>ApplicationTimeouts</code> of the application.]]>
+      </doc>
+    </method>
+    <method name="setApplicationTimeouts"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTimeouts" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationTimeouts</code> for the application in seconds.
+ All pre-existing Map entries are cleared before adding the new Map.
+ <p>
+ <b>Note:</b> If application timeout value is less than or equal to zero
+ then application submission will throw an exception.
+ </p>
+ @param applicationTimeouts <code>ApplicationTimeouts</code>s for the
+          application]]>
+      </doc>
+    </method>
+    <method name="getApplicationSchedulingPropertiesMap" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get application scheduling environment variables stored as a key value
+ pair map for application.
+
+ @return placement envs for application.]]>
+      </doc>
+    </method>
+    <method name="setApplicationSchedulingPropertiesMap"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="schedulingEnvMap" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Set the scheduling envs for the application.
+
+ @param schedulingEnvMap
+          A map of env's for the application scheduling preferences.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@code ApplicationSubmissionContext} represents all of the
+ information needed by the {@code ResourceManager} to launch
+ the {@code ApplicationMaster} for an application.
+ <p>
+ It includes details such as:
+ <ul>
+   <li>{@link ApplicationId} of the application.</li>
+   <li>Application user.</li>
+   <li>Application name.</li>
+   <li>{@link Priority} of the application.</li>
+   <li>
+     {@link ContainerLaunchContext} of the container in which the
+     <code>ApplicationMaster</code> is executed.
+   </li>
+   <li>
+     maxAppAttempts. The maximum number of application attempts.
+     It should be no larger than the global number of max attempts in the
+     YARN configuration.
+   </li>
+   <li>
+     attemptFailuresValidityInterval. The default value is -1.
+     when attemptFailuresValidityInterval in milliseconds is set to
+     {@literal >} 0, the failure number will no take failures which happen
+     out of the validityInterval into failure count. If failure count
+     reaches to maxAppAttempts, the application will be failed.
+   </li>
+   <li>Optional, application-specific {@link LogAggregationContext}</li>
+ </ul>
+ 
+ @see ContainerLaunchContext
+ @see ApplicationClientProtocol#submitApplication(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ApplicationTimeout -->
+  <class name="ApplicationTimeout" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ApplicationTimeout"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ApplicationTimeout"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="org.apache.hadoop.yarn.api.records.ApplicationTimeoutType"/>
+      <param name="expiryTime" type="java.lang.String"/>
+      <param name="remainingTime" type="long"/>
+    </method>
+    <method name="getTimeoutType" return="org.apache.hadoop.yarn.api.records.ApplicationTimeoutType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the application timeout type.
+ @return timeoutType of an application timeout.]]>
+      </doc>
+    </method>
+    <method name="setTimeoutType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timeoutType" type="org.apache.hadoop.yarn.api.records.ApplicationTimeoutType"/>
+      <doc>
+      <![CDATA[Set the application timeout type.
+ @param timeoutType of an application timeout.]]>
+      </doc>
+    </method>
+    <method name="getExpiryTime" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <code>expiryTime</code> for given timeout type.
+ @return expiryTime in ISO8601 standard with format
+         <b>yyyy-MM-dd'T'HH:mm:ss.SSSZ</b>.]]>
+      </doc>
+    </method>
+    <method name="setExpiryTime"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="expiryTime" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set <code>expiryTime</code> for given timeout type.
+ @param expiryTime in ISO8601 standard with format
+          <b>yyyy-MM-dd'T'HH:mm:ss.SSSZ</b>.]]>
+      </doc>
+    </method>
+    <method name="getRemainingTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <code>Remaining Time</code> of an application for given timeout type.
+ @return Remaining Time in seconds.]]>
+      </doc>
+    </method>
+    <method name="setRemainingTime"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="remainingTime" type="long"/>
+      <doc>
+      <![CDATA[Set <code>Remaining Time</code> of an application for given timeout type.
+ @param remainingTime in seconds.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@code ApplicationTimeout} is a report for configured application timeouts.
+ It includes details such as:
+ <ul>
+ <li>{@link ApplicationTimeoutType} of the timeout type.</li>
+ <li>Expiry time in ISO8601 standard with format
+ <b>yyyy-MM-dd'T'HH:mm:ss.SSSZ</b> or "UNLIMITED".</li>
+ <li>Remaining time in seconds.</li>
+ </ul>
+ The possible values for {ExpiryTime, RemainingTimeInSeconds} are
+ <ul>
+ <li>{UNLIMITED,-1} : Timeout is not configured for given timeout type
+ (LIFETIME).</li>
+ <li>{ISO8601 date string, 0} : Timeout is configured and application has
+ completed.</li>
+ <li>{ISO8601 date string, greater than zero} : Timeout is configured and
+ application is RUNNING. Application will be timed out after configured
+ value.</li>
+ </ul>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ApplicationTimeout -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ApplicationTimeoutType -->
+  <class name="ApplicationTimeoutType" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.ApplicationTimeoutType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.ApplicationTimeoutType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Application timeout type.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ApplicationTimeoutType -->
+  <!-- start class org.apache.hadoop.yarn.api.records.CollectorInfo -->
+  <class name="CollectorInfo" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="CollectorInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.CollectorInfo"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="collectorAddr" type="java.lang.String"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.CollectorInfo"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="collectorAddr" type="java.lang.String"/>
+      <param name="token" type="org.apache.hadoop.yarn.api.records.Token"/>
+    </method>
+    <method name="getCollectorAddr" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setCollectorAddr"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="addr" type="java.lang.String"/>
+    </method>
+    <method name="getCollectorToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get delegation token for app collector which AM will use to publish
+ entities.
+ @return the delegation token for app collector.]]>
+      </doc>
+    </method>
+    <method name="setCollectorToken"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="token" type="org.apache.hadoop.yarn.api.records.Token"/>
+    </method>
+    <field name="DEFAULT_TIMESTAMP_VALUE" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Collector info containing collector address and collector token passed from
+ RM to AM in Allocate Response.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.CollectorInfo -->
+  <!-- start class org.apache.hadoop.yarn.api.records.Container -->
+  <class name="Container" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="Container"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the globally unique identifier for the container.
+ @return globally unique identifier for the container]]>
+      </doc>
+    </method>
+    <method name="getNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the identifier of the node on which the container is allocated.
+ @return identifier of the node on which the container is allocated]]>
+      </doc>
+    </method>
+    <method name="getNodeHttpAddress" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the http uri of the node on which the container is allocated.
+ @return http uri of the node on which the container is allocated]]>
+      </doc>
+    </method>
+    <method name="getExposedPorts" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the exposed ports of the node on which the container is allocated.
+ @return exposed ports of the node on which the container is allocated]]>
+      </doc>
+    </method>
+    <method name="getResource" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>Resource</code> allocated to the container.
+ @return <code>Resource</code> allocated to the container]]>
+      </doc>
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>Priority</code> at which the <code>Container</code> was
+ allocated.
+ @return <code>Priority</code> at which the <code>Container</code> was
+         allocated]]>
+      </doc>
+    </method>
+    <method name="getContainerToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerToken</code> for the container.
+ <p><code>ContainerToken</code> is the security token used by the framework
+ to verify authenticity of any <code>Container</code>.</p>
+
+ <p>The <code>ResourceManager</code>, on container allocation provides a
+ secure token which is verified by the <code>NodeManager</code> on
+ container launch.</p>
+
+ <p>Applications do not need to care about <code>ContainerToken</code>, they
+ are transparently handled by the framework - the allocated
+ <code>Container</code> includes the <code>ContainerToken</code>.</p>
+
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)
+
+ @return <code>ContainerToken</code> for the container]]>
+      </doc>
+    </method>
+    <method name="getAllocationRequestId" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the optional <em>ID</em> corresponding to the original {@code
+ ResourceRequest{@link #getAllocationRequestId()}}s which is satisfied by
+ this allocated {@code Container}.
+ <p>
+ The scheduler may return multiple {@code AllocateResponse}s corresponding
+ to the same ID as and when scheduler allocates {@code Container}s.
+ <b>Applications</b> can continue to completely ignore the returned ID in
+ the response and use the allocation for any of their outstanding requests.
+ <p>
+
+ @return the <em>ID</em> corresponding to the original  allocation request
+ which is satisfied by this allocation.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@code Container} represents an allocated resource in the cluster.
+ <p>
+ The {@code ResourceManager} is the sole authority to allocate any
+ {@code Container} to applications. The allocated {@code Container}
+ is always on a single node and has a unique {@link ContainerId}. It has
+ a specific amount of {@link Resource} allocated.
+ <p>
+ It includes details such as:
+ <ul>
+   <li>{@link ContainerId} for the container, which is globally unique.</li>
+   <li>
+     {@link NodeId} of the node on which it is allocated.
+   </li>
+   <li>HTTP uri of the node.</li>
+   <li>{@link Resource} allocated to the container.</li>
+   <li>{@link Priority} at which the container was allocated.</li>
+   <li>
+     Container {@link Token} of the container, used to securely verify
+     authenticity of the allocation.
+   </li>
+ </ul>
+ 
+ Typically, an {@code ApplicationMaster} receives the {@code Container}
+ from the {@code ResourceManager} during resource-negotiation and then
+ talks to the {@code NodeManager} to start/stop containers.
+ 
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)
+ @see ContainerManagementProtocol#stopContainers(org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.Container -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ContainerExitStatus -->
+  <class name="ContainerExitStatus" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerExitStatus"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <field name="SUCCESS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="INVALID" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ABORTED" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Containers killed by the framework, either due to being released by
+ the application or being 'lost' due to node failures etc.]]>
+      </doc>
+    </field>
+    <field name="DISKS_FAILED" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[When threshold number of the nodemanager-local-directories or
+ threshold number of the nodemanager-log-directories become bad.]]>
+      </doc>
+    </field>
+    <field name="PREEMPTED" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Containers preempted by the framework.]]>
+      </doc>
+    </field>
+    <field name="KILLED_EXCEEDED_VMEM" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Container terminated because of exceeding allocated virtual memory.]]>
+      </doc>
+    </field>
+    <field name="KILLED_EXCEEDED_PMEM" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Container terminated because of exceeding allocated physical memory.]]>
+      </doc>
+    </field>
+    <field name="KILLED_BY_APPMASTER" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Container was terminated by stop request by the app master.]]>
+      </doc>
+    </field>
+    <field name="KILLED_BY_RESOURCEMANAGER" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Container was terminated by the resource manager.]]>
+      </doc>
+    </field>
+    <field name="KILLED_AFTER_APP_COMPLETION" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Container was terminated after the application finished.]]>
+      </doc>
+    </field>
+    <field name="KILLED_BY_CONTAINER_SCHEDULER" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Container was terminated by the ContainerScheduler to make room
+ for another container...]]>
+      </doc>
+    </field>
+    <field name="KILLED_FOR_EXCESS_LOGS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Container was terminated for generating excess log data.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[Container exit statuses indicating special exit circumstances.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ContainerExitStatus -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ContainerId -->
+  <class name="ContainerId" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="ContainerId"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="containerId" type="long"/>
+    </method>
+    <method name="getApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationAttemptId</code> of the application to which the
+ <code>Container</code> was assigned.
+ <p>
+ Note: If containers are kept alive across application attempts via
+ {@link ApplicationSubmissionContext#setKeepContainersAcrossApplicationAttempts(boolean)}
+ the <code>ContainerId</code> does not necessarily contain the current
+ running application attempt's <code>ApplicationAttemptId</code> This
+ container can be allocated by previously exited application attempt and
+ managed by the current running attempt thus have the previous application
+ attempt's <code>ApplicationAttemptId</code>.
+ </p>
+ 
+ @return <code>ApplicationAttemptId</code> of the application to which the
+         <code>Container</code> was assigned]]>
+      </doc>
+    </method>
+    <method name="getId" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the lower 32 bits of identifier of the <code>ContainerId</code>,
+ which doesn't include epoch. Note that this method will be marked as
+ deprecated, so please use <code>getContainerId</code> instead.
+ @return lower 32 bits of identifier of the <code>ContainerId</code>]]>
+      </doc>
+    </method>
+    <method name="getContainerId" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the identifier of the <code>ContainerId</code>. Upper 24 bits are
+ reserved as epoch of cluster, and lower 40 bits are reserved as
+ sequential number of containers.
+ @return identifier of the <code>ContainerId</code>]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return A string representation of containerId. The format is
+ container_e*epoch*_*clusterTimestamp*_*appId*_*attemptId*_*containerId*
+ when epoch is larger than 0
+ (e.g. container_e17_1410901177871_0001_01_000005).
+ *epoch* is increased when RM restarts or fails over.
+ When epoch is 0, epoch is omitted
+ (e.g. container_1410901177871_0001_01_000005).]]>
+      </doc>
+    </method>
+    <method name="fromString" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerIdStr" type="java.lang.String"/>
+    </method>
+    <method name="build"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <field name="CONTAINER_ID_BITMASK" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[<p><code>ContainerId</code> represents a globally unique identifier
+ for a {@link Container} in the cluster.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ContainerId -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ContainerLaunchContext -->
+  <class name="ContainerLaunchContext" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerLaunchContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="localResources" type="java.util.Map"/>
+      <param name="environment" type="java.util.Map"/>
+      <param name="commands" type="java.util.List"/>
+      <param name="serviceData" type="java.util.Map"/>
+      <param name="tokens" type="java.nio.ByteBuffer"/>
+      <param name="acls" type="java.util.Map"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="localResources" type="java.util.Map"/>
+      <param name="environment" type="java.util.Map"/>
+      <param name="commands" type="java.util.List"/>
+      <param name="serviceData" type="java.util.Map"/>
+      <param name="tokens" type="java.nio.ByteBuffer"/>
+      <param name="acls" type="java.util.Map"/>
+      <param name="containerRetryContext" type="org.apache.hadoop.yarn.api.records.ContainerRetryContext"/>
+    </method>
+    <method name="getTokens" return="java.nio.ByteBuffer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get all the tokens needed by this container. It may include file-system
+ tokens, ApplicationMaster related tokens if this container is an
+ ApplicationMaster or framework level tokens needed by this container to
+ communicate to various services in a secure manner.
+ 
+ @return tokens needed by this container.]]>
+      </doc>
+    </method>
+    <method name="setTokens"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tokens" type="java.nio.ByteBuffer"/>
+      <doc>
+      <![CDATA[Set security tokens needed by this container.
+ @param tokens security tokens]]>
+      </doc>
+    </method>
+    <method name="getTokensConf" return="java.nio.ByteBuffer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the configuration used by RM to renew tokens.
+ @return The configuration used by RM to renew the tokens.]]>
+      </doc>
+    </method>
+    <method name="setTokensConf"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="tokensConf" type="java.nio.ByteBuffer"/>
+      <doc>
+      <![CDATA[Set the configuration used by RM to renew the tokens.
+ @param tokensConf The configuration used by RM to renew the tokens]]>
+      </doc>
+    </method>
+    <method name="getLocalResources" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <code>LocalResource</code> required by the container.
+ @return all <code>LocalResource</code> required by the container]]>
+      </doc>
+    </method>
+    <method name="setLocalResources"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="localResources" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Set <code>LocalResource</code> required by the container. All pre-existing
+ Map entries are cleared before adding the new Map
+ @param localResources <code>LocalResource</code> required by the container]]>
+      </doc>
+    </method>
+    <method name="getServiceData" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[<p>
+ Get application-specific binary <em>service data</em>. This is a map keyed
+ by the name of each {@link AuxiliaryService} that is configured on a
+ NodeManager and value correspond to the application specific data targeted
+ for the keyed {@link AuxiliaryService}.
+ </p>
+ 
+ <p>
+ This will be used to initialize this application on the specific
+ {@link AuxiliaryService} running on the NodeManager by calling
+ {@link AuxiliaryService#initializeApplication(ApplicationInitializationContext)}
+ </p>
+ 
+ @return application-specific binary <em>service data</em>]]>
+      </doc>
+    </method>
+    <method name="setServiceData"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="serviceData" type="java.util.Map"/>
+      <doc>
+      <![CDATA[<p>
+ Set application-specific binary <em>service data</em>. This is a map keyed
+ by the name of each {@link AuxiliaryService} that is configured on a
+ NodeManager and value correspond to the application specific data targeted
+ for the keyed {@link AuxiliaryService}. All pre-existing Map entries are
+ preserved.
+ </p>
+ 
+ @param serviceData
+          application-specific binary <em>service data</em>]]>
+      </doc>
+    </method>
+    <method name="getEnvironment" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <em>environment variables</em> for the container.
+ @return <em>environment variables</em> for the container]]>
+      </doc>
+    </method>
+    <method name="setEnvironment"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="environment" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Add <em>environment variables</em> for the container. All pre-existing Map
+ entries are cleared before adding the new Map
+ @param environment <em>environment variables</em> for the container]]>
+      </doc>
+    </method>
+    <method name="getCommands" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <em>commands</em> for launching the container.
+ @return the list of <em>commands</em> for launching the container]]>
+      </doc>
+    </method>
+    <method name="setCommands"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="commands" type="java.util.List"/>
+      <doc>
+      <![CDATA[Add the list of <em>commands</em> for launching the container. All
+ pre-existing List entries are cleared before adding the new List
+ @param commands the list of <em>commands</em> for launching the container]]>
+      </doc>
+    </method>
+    <method name="getApplicationACLs" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationACL</code>s for the application. 
+ @return all the <code>ApplicationACL</code>s]]>
+      </doc>
+    </method>
+    <method name="setApplicationACLs"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="acls" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Set the <code>ApplicationACL</code>s for the application. All pre-existing
+ Map entries are cleared before adding the new Map
+ @param acls <code>ApplicationACL</code>s for the application]]>
+      </doc>
+    </method>
+    <method name="getContainerRetryContext" return="org.apache.hadoop.yarn.api.records.ContainerRetryContext"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerRetryContext</code> to relaunch container.
+ @return <code>ContainerRetryContext</code> to relaunch container.]]>
+      </doc>
+    </method>
+    <method name="setContainerRetryContext"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerRetryContext" type="org.apache.hadoop.yarn.api.records.ContainerRetryContext"/>
+      <doc>
+      <![CDATA[Set the <code>ContainerRetryContext</code> to relaunch container.
+ @param containerRetryContext <code>ContainerRetryContext</code> to
+                              relaunch container.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@code ContainerLaunchContext} represents all of the information
+ needed by the {@code NodeManager} to launch a container.
+ <p>
+ It includes details such as:
+ <ul>
+   <li>{@link ContainerId} of the container.</li>
+   <li>{@link Resource} allocated to the container.</li>
+   <li>User to whom the container is allocated.</li>
+   <li>Security tokens (if security is enabled).</li>
+   <li>
+     {@link LocalResource} necessary for running the container such
+     as binaries, jar, shared-objects, side-files etc.
+   </li>
+   <li>Optional, application-specific binary service data.</li>
+   <li>Environment variables for the launched process.</li>
+   <li>Command to launch the container.</li>
+   <li>Retry strategy when container exits with failure.</li>
+ </ul>
+ 
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ContainerLaunchContext -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ContainerReport -->
+  <class name="ContainerReport" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerId</code> of the container.
+ 
+ @return <code>ContainerId</code> of the container.]]>
+      </doc>
+    </method>
+    <method name="setContainerId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+    </method>
+    <method name="getAllocatedResource" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the allocated <code>Resource</code> of the container.
+ 
+ @return allocated <code>Resource</code> of the container.]]>
+      </doc>
+    </method>
+    <method name="setAllocatedResource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resource" type="org.apache.hadoop.yarn.api.records.Resource"/>
+    </method>
+    <method name="getAssignedNode" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the allocated <code>NodeId</code> where container is running.
+ 
+ @return allocated <code>NodeId</code> where container is running.]]>
+      </doc>
+    </method>
+    <method name="setAssignedNode"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the allocated <code>Priority</code> of the container.
+ 
+ @return allocated <code>Priority</code> of the container.]]>
+      </doc>
+    </method>
+    <method name="setPriority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+    </method>
+    <method name="getCreationTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the creation time of the container.
+ 
+ @return creation time of the container]]>
+      </doc>
+    </method>
+    <method name="setCreationTime"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="creationTime" type="long"/>
+    </method>
+    <method name="getFinishTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the Finish time of the container.
+ 
+ @return Finish time of the container]]>
+      </doc>
+    </method>
+    <method name="setFinishTime"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="finishTime" type="long"/>
+    </method>
+    <method name="getDiagnosticsInfo" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the DiagnosticsInfo of the container.
+ 
+ @return DiagnosticsInfo of the container]]>
+      </doc>
+    </method>
+    <method name="setDiagnosticsInfo"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="diagnosticsInfo" type="java.lang.String"/>
+    </method>
+    <method name="getLogUrl" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the LogURL of the container.
+ 
+ @return LogURL of the container]]>
+      </doc>
+    </method>
+    <method name="setLogUrl"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="logUrl" type="java.lang.String"/>
+    </method>
+    <method name="getContainerState" return="org.apache.hadoop.yarn.api.records.ContainerState"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the final <code>ContainerState</code> of the container.
+ 
+ @return final <code>ContainerState</code> of the container.]]>
+      </doc>
+    </method>
+    <method name="setContainerState"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerState" type="org.apache.hadoop.yarn.api.records.ContainerState"/>
+    </method>
+    <method name="getContainerExitStatus" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the final <code>exit status</code> of the container.
+ 
+ @return final <code>exit status</code> of the container.]]>
+      </doc>
+    </method>
+    <method name="setContainerExitStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerExitStatus" type="int"/>
+    </method>
+    <method name="getExposedPorts" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get exposed ports of the container.
+ 
+ @return the node exposed ports of the container]]>
+      </doc>
+    </method>
+    <method name="getNodeHttpAddress" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the Node Http address of the container.
+
+ @return the node http address of the container]]>
+      </doc>
+    </method>
+    <method name="getExecutionType" return="org.apache.hadoop.yarn.api.records.ExecutionType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the execution type of the container.
+
+ @return the execution type of the container]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@code ContainerReport} is a report of an container.
+ <p>
+ It includes details such as:
+ <ul>
+   <li>{@link ContainerId} of the container.</li>
+   <li>Allocated Resources to the container.</li>
+   <li>Assigned Node id.</li>
+   <li>Assigned Priority.</li>
+   <li>Creation Time.</li>
+   <li>Finish Time.</li>
+   <li>Container Exit Status.</li>
+   <li>{@link ContainerState} of the container.</li>
+   <li>Diagnostic information in case of errors.</li>
+   <li>Log URL.</li>
+   <li>nodeHttpAddress</li>
+ </ul>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ContainerReport -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ContainerRetryContext -->
+  <class name="ContainerRetryContext" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerRetryContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRetryPolicy" return="org.apache.hadoop.yarn.api.records.ContainerRetryPolicy"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setRetryPolicy"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="retryPolicy" type="org.apache.hadoop.yarn.api.records.ContainerRetryPolicy"/>
+    </method>
+    <method name="getErrorCodes" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setErrorCodes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="errorCodes" type="java.util.Set"/>
+    </method>
+    <method name="getMaxRetries" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setMaxRetries"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="maxRetries" type="int"/>
+    </method>
+    <method name="getRetryInterval" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setRetryInterval"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="retryInterval" type="int"/>
+    </method>
+    <method name="getFailuresValidityInterval" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setFailuresValidityInterval"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="failuresValidityInterval" type="long"/>
+    </method>
+    <field name="RETRY_FOREVER" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RETRY_INVALID" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NEVER_RETRY_CONTEXT" type="org.apache.hadoop.yarn.api.records.ContainerRetryContext"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[{@code ContainerRetryContext} indicates how container retry after it fails
+ to run.
+ <p>
+ It provides details such as:
+ <ul>
+   <li>
+     {@link ContainerRetryPolicy} :
+     - NEVER_RETRY(DEFAULT value): no matter what error code is when container
+       fails to run, just do not retry.
+     - RETRY_ON_ALL_ERRORS: no matter what error code is, when container fails
+       to run, just retry.
+     - RETRY_ON_SPECIFIC_ERROR_CODES: when container fails to run, do retry if
+       the error code is one of <em>errorCodes</em>, otherwise do not retry.
+
+     Note: if error code is 137(SIGKILL) or 143(SIGTERM), it will not retry
+     because it is usually killed on purpose.
+   </li>
+   <li>
+     <em>maxRetries</em> specifies how many times to retry if need to retry.
+     If the value is -1, it means retry forever.
+   </li>
+   <li><em>retryInterval</em> specifies delaying some time before relaunch
+   container, the unit is millisecond.</li>
+   <li>
+     <em>failuresValidityInterval</em>: default value is -1.
+     When failuresValidityInterval in milliseconds is set to {@literal >} 0,
+     the failure number will not take failures which happen out of the
+     failuresValidityInterval into failure count. If failure count
+     reaches to <em>maxRetries</em>, the container will be failed.
+   </li>
+ </ul>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ContainerRetryContext -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ContainerRetryPolicy -->
+  <class name="ContainerRetryPolicy" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.ContainerRetryPolicy[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.ContainerRetryPolicy"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[<p>Retry policy for relaunching a <code>Container</code>.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ContainerRetryPolicy -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ContainerState -->
+  <class name="ContainerState" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.ContainerState[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.ContainerState"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[<p>State of a <code>Container</code>.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ContainerState -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ContainerStatus -->
+  <class name="ContainerStatus" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerStatus"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerId</code> of the container.
+ @return <code>ContainerId</code> of the container]]>
+      </doc>
+    </method>
+    <method name="getExecutionType" return="org.apache.hadoop.yarn.api.records.ExecutionType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ExecutionType</code> of the container.
+ @return <code>ExecutionType</code> of the container]]>
+      </doc>
+    </method>
+    <method name="getState" return="org.apache.hadoop.yarn.api.records.ContainerState"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerState</code> of the container.
+ @return <code>ContainerState</code> of the container]]>
+      </doc>
+    </method>
+    <method name="getExitStatus" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[<p>Get the <em>exit status</em> for the container.</p>
+  
+ <p>Note: This is valid only for completed containers i.e. containers
+ with state {@link ContainerState#COMPLETE}. 
+ Otherwise, it returns an ContainerExitStatus.INVALID.
+ </p>
+ 
+ <p>Containers killed by the framework, either due to being released by
+ the application or being 'lost' due to node failures etc. have a special
+ exit code of ContainerExitStatus.ABORTED.</p>
+ 
+ <p>When threshold number of the nodemanager-local-directories or
+ threshold number of the nodemanager-log-directories become bad, then
+ container is not launched and is exited with ContainersExitStatus.DISKS_FAILED.
+ </p>
+  
+ @return <em>exit status</em> for the container]]>
+      </doc>
+    </method>
+    <method name="getDiagnostics" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <em>diagnostic messages</em> for failed containers.
+ @return <em>diagnostic messages</em> for failed containers]]>
+      </doc>
+    </method>
+    <method name="getCapability" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>Resource</code> allocated to the container.
+ @return <code>Resource</code> allocated to the container]]>
+      </doc>
+    </method>
+    <method name="getIPs" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get all the IP addresses with which the container run.
+ @return The IP address where the container runs.]]>
+      </doc>
+    </method>
+    <method name="getHost" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the hostname where the container runs.
+ @return The hostname where the container runs.]]>
+      </doc>
+    </method>
+    <method name="getExposedPorts" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get exposed ports of the container.
+ @return List of exposed ports]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@code ContainerStatus} represents the current status of a
+ {@code Container}.
+ <p>
+ It provides details such as:
+ <ul>
+   <li>{@code ContainerId} of the container.</li>
+   <li>{@code ExecutionType} of the container.</li>
+   <li>{@code ContainerState} of the container.</li>
+   <li><em>Exit status</em> of a completed container.</li>
+   <li><em>Diagnostic</em> message for a failed container.</li>
+   <li>{@link Resource} allocated to the container.</li>
+ </ul>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ContainerStatus -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ContainerSubState -->
+  <class name="ContainerSubState" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.ContainerSubState[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.ContainerSubState"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Container Sub-State.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ContainerSubState -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ContainerUpdateType -->
+  <class name="ContainerUpdateType" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.ContainerUpdateType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.ContainerUpdateType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Encodes the type of Container Update.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ContainerUpdateType -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ExecutionType -->
+  <class name="ExecutionType" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.ExecutionType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.ExecutionType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Container property encoding execution semantics.
+
+ <p>
+ The execution types are the following:
+ <ul>
+   <li>{@link #GUARANTEED} - this container is guaranteed to start its
+   execution, once the corresponding start container request is received by
+   an NM.
+   <li>{@link #OPPORTUNISTIC} - the execution of this container may not start
+   immediately at the NM that receives the corresponding start container
+   request (depending on the NM's available resources). Moreover, it may be
+   preempted if it blocks a GUARANTEED container from being executed.
+ </ul>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ExecutionType -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ExecutionTypeRequest -->
+  <class name="ExecutionTypeRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="ExecutionTypeRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ExecutionTypeRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ExecutionTypeRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="execType" type="org.apache.hadoop.yarn.api.records.ExecutionType"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ExecutionTypeRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="execType" type="org.apache.hadoop.yarn.api.records.ExecutionType"/>
+      <param name="ensureExecutionType" type="boolean"/>
+    </method>
+    <method name="setExecutionType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="execType" type="org.apache.hadoop.yarn.api.records.ExecutionType"/>
+      <doc>
+      <![CDATA[Set the <code>ExecutionType</code> of the requested container.
+
+ @param execType
+          ExecutionType of the requested container]]>
+      </doc>
+    </method>
+    <method name="getExecutionType" return="org.apache.hadoop.yarn.api.records.ExecutionType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <code>ExecutionType</code>.
+
+ @return <code>ExecutionType</code>.]]>
+      </doc>
+    </method>
+    <method name="setEnforceExecutionType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="enforceExecutionType" type="boolean"/>
+      <doc>
+      <![CDATA[Set to true to explicitly ask that the Scheduling Authority return
+ Containers of exactly the Execution Type requested.
+ @param enforceExecutionType whether ExecutionType request should be
+                            strictly honored.]]>
+      </doc>
+    </method>
+    <method name="getEnforceExecutionType" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get whether Scheduling Authority should return Containers of exactly the
+ Execution Type requested for this <code>ResourceRequest</code>.
+ Defaults to false.
+ @return whether ExecutionType request should be strictly honored]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <doc>
+    <![CDATA[An object of this class represents a specification of the execution
+ guarantee of the Containers associated with a ResourceRequest. It consists
+ of an <code>ExecutionType</code> as well as flag that explicitly asks the
+ configuredScheduler to return Containers of exactly the Execution Type
+ requested.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ExecutionTypeRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.records.FinalApplicationStatus -->
+  <class name="FinalApplicationStatus" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.FinalApplicationStatus[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Enumeration of various final states of an <code>Application</code>.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.FinalApplicationStatus -->
+  <!-- start class org.apache.hadoop.yarn.api.records.LocalizationState -->
+  <class name="LocalizationState" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.LocalizationState[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.LocalizationState"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[State of localization.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.LocalizationState -->
+  <!-- start class org.apache.hadoop.yarn.api.records.LocalizationStatus -->
+  <class name="LocalizationStatus" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="LocalizationStatus"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.LocalizationStatus"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceKey" type="java.lang.String"/>
+      <param name="localizationState" type="org.apache.hadoop.yarn.api.records.LocalizationState"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.LocalizationStatus"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceKey" type="java.lang.String"/>
+      <param name="localizationState" type="org.apache.hadoop.yarn.api.records.LocalizationState"/>
+      <param name="diagnostics" type="java.lang.String"/>
+    </method>
+    <method name="getResourceKey" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the resource key.
+
+ @return resource key.]]>
+      </doc>
+    </method>
+    <method name="getLocalizationState" return="org.apache.hadoop.yarn.api.records.LocalizationState"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the localization sate.
+
+ @return localization state.]]>
+      </doc>
+    </method>
+    <method name="getDiagnostics" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the diagnostics.
+
+ @return diagnostics.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Represents the localization status of a resource.
+ The status of the localization includes:
+ <ul>
+   <li>resource key</li>
+   <li>{@link LocalizationState} of the resource</li>
+ </ul>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.LocalizationStatus -->
+  <!-- start class org.apache.hadoop.yarn.api.records.LocalResource -->
+  <class name="LocalResource" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="LocalResource"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.LocalResource"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="org.apache.hadoop.yarn.api.records.URL"/>
+      <param name="type" type="org.apache.hadoop.yarn.api.records.LocalResourceType"/>
+      <param name="visibility" type="org.apache.hadoop.yarn.api.records.LocalResourceVisibility"/>
+      <param name="size" type="long"/>
+      <param name="timestamp" type="long"/>
+      <param name="pattern" type="java.lang.String"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.LocalResource"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="org.apache.hadoop.yarn.api.records.URL"/>
+      <param name="type" type="org.apache.hadoop.yarn.api.records.LocalResourceType"/>
+      <param name="visibility" type="org.apache.hadoop.yarn.api.records.LocalResourceVisibility"/>
+      <param name="size" type="long"/>
+      <param name="timestamp" type="long"/>
+      <param name="pattern" type="java.lang.String"/>
+      <param name="shouldBeUploadedToSharedCache" type="boolean"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.LocalResource"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="org.apache.hadoop.yarn.api.records.URL"/>
+      <param name="type" type="org.apache.hadoop.yarn.api.records.LocalResourceType"/>
+      <param name="visibility" type="org.apache.hadoop.yarn.api.records.LocalResourceVisibility"/>
+      <param name="size" type="long"/>
+      <param name="timestamp" type="long"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.LocalResource"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="org.apache.hadoop.yarn.api.records.URL"/>
+      <param name="type" type="org.apache.hadoop.yarn.api.records.LocalResourceType"/>
+      <param name="visibility" type="org.apache.hadoop.yarn.api.records.LocalResourceVisibility"/>
+      <param name="size" type="long"/>
+      <param name="timestamp" type="long"/>
+      <param name="shouldBeUploadedToSharedCache" type="boolean"/>
+    </method>
+    <method name="getResource" return="org.apache.hadoop.yarn.api.records.URL"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>location</em> of the resource to be localized.
+ @return <em>location</em> of the resource to be localized]]>
+      </doc>
+    </method>
+    <method name="setResource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resource" type="org.apache.hadoop.yarn.api.records.URL"/>
+      <doc>
+      <![CDATA[Set <em>location</em> of the resource to be localized.
+ @param resource <em>location</em> of the resource to be localized]]>
+      </doc>
+    </method>
+    <method name="getSize" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>size</em> of the resource to be localized.
+ @return <em>size</em> of the resource to be localized]]>
+      </doc>
+    </method>
+    <method name="setSize"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="size" type="long"/>
+      <doc>
+      <![CDATA[Set the <em>size</em> of the resource to be localized.
+ @param size <em>size</em> of the resource to be localized]]>
+      </doc>
+    </method>
+    <method name="getTimestamp" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the original <em>timestamp</em> of the resource to be localized, used
+ for verification.
+ @return <em>timestamp</em> of the resource to be localized]]>
+      </doc>
+    </method>
+    <method name="setTimestamp"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timestamp" type="long"/>
+      <doc>
+      <![CDATA[Set the <em>timestamp</em> of the resource to be localized, used
+ for verification.
+ @param timestamp <em>timestamp</em> of the resource to be localized]]>
+      </doc>
+    </method>
+    <method name="getType" return="org.apache.hadoop.yarn.api.records.LocalResourceType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>LocalResourceType</code> of the resource to be localized.
+ @return <code>LocalResourceType</code> of the resource to be localized]]>
+      </doc>
+    </method>
+    <method name="setType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="org.apache.hadoop.yarn.api.records.LocalResourceType"/>
+      <doc>
+      <![CDATA[Set the <code>LocalResourceType</code> of the resource to be localized.
+ @param type <code>LocalResourceType</code> of the resource to be localized]]>
+      </doc>
+    </method>
+    <method name="getVisibility" return="org.apache.hadoop.yarn.api.records.LocalResourceVisibility"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>LocalResourceVisibility</code> of the resource to be 
+ localized.
+ @return <code>LocalResourceVisibility</code> of the resource to be 
+         localized]]>
+      </doc>
+    </method>
+    <method name="setVisibility"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="visibility" type="org.apache.hadoop.yarn.api.records.LocalResourceVisibility"/>
+      <doc>
+      <![CDATA[Set the <code>LocalResourceVisibility</code> of the resource to be 
+ localized.
+ @param visibility <code>LocalResourceVisibility</code> of the resource to be 
+                   localized]]>
+      </doc>
+    </method>
+    <method name="getPattern" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>pattern</em> that should be used to extract entries from the
+ archive (only used when type is <code>PATTERN</code>).
+ @return <em>pattern</em> that should be used to extract entries from the 
+ archive.]]>
+      </doc>
+    </method>
+    <method name="setPattern"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pattern" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <em>pattern</em> that should be used to extract entries from the
+ archive (only used when type is <code>PATTERN</code>).
+ @param pattern <em>pattern</em> that should be used to extract entries 
+ from the archive.]]>
+      </doc>
+    </method>
+    <method name="getShouldBeUploadedToSharedCache" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[NM uses it to decide whether if it is necessary to upload the resource to
+ the shared cache
+ @return true if it is necessary to upload the resource
+                 to the shared cache,
+         false otherwise]]>
+      </doc>
+    </method>
+    <method name="setShouldBeUploadedToSharedCache"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="shouldBeUploadedToSharedCache" type="boolean"/>
+      <doc>
+      <![CDATA[Inform NM whether upload to SCM is needed.
+
+ @param shouldBeUploadedToSharedCache <em>shouldBeUploadedToSharedCache</em>
+          of this request]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p><code>LocalResource</code> represents a local resource required to
+ run a container.</p>
+ 
+ <p>The <code>NodeManager</code> is responsible for localizing the resource 
+ prior to launching the container.</p>
+ 
+ <p>Applications can specify {@link LocalResourceType} and 
+ {@link LocalResourceVisibility}.</p>
+ 
+ @see LocalResourceType
+ @see LocalResourceVisibility
+ @see ContainerLaunchContext
+ @see ApplicationSubmissionContext
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.LocalResource -->
+  <!-- start class org.apache.hadoop.yarn.api.records.LocalResourceType -->
+  <class name="LocalResourceType" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.LocalResourceType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.LocalResourceType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[{@code LocalResourceType} specifies the <em>type</em>
+ of a resource localized by the {@code NodeManager}.
+ <p>
+ The <em>type</em> can be one of:
+ <ul>
+   <li>
+     {@link #FILE} - Regular file i.e. uninterpreted bytes.
+   </li>
+   <li>
+     {@link #ARCHIVE} - Archive, which is automatically unarchived by the
+     <code>NodeManager</code>.
+   </li>
+   <li>
+     {@link #PATTERN} - A hybrid between {@link #ARCHIVE} and {@link #FILE}.
+   </li>
+ </ul>
+
+ @see LocalResource
+ @see ContainerLaunchContext
+ @see ApplicationSubmissionContext
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.LocalResourceType -->
+  <!-- start class org.apache.hadoop.yarn.api.records.LocalResourceVisibility -->
+  <class name="LocalResourceVisibility" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.LocalResourceVisibility[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.LocalResourceVisibility"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[{@code LocalResourceVisibility} specifies the <em>visibility</em>
+ of a resource localized by the {@code NodeManager}.
+ <p>
+ The <em>visibility</em> can be one of:
+ <ul>
+   <li>{@link #PUBLIC} - Shared by all users on the node.</li>
+   <li>
+     {@link #PRIVATE} - Shared among all applications of the
+     <em>same user</em> on the node.
+   </li>
+   <li>
+     {@link #APPLICATION} - Shared only among containers of the
+     <em>same application</em> on the node.
+   </li>
+ </ul>
+ 
+ @see LocalResource
+ @see ContainerLaunchContext
+ @see ApplicationSubmissionContext
+ @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.LocalResourceVisibility -->
+  <!-- start class org.apache.hadoop.yarn.api.records.LogAggregationContext -->
+  <class name="LogAggregationContext" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="LogAggregationContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.LogAggregationContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="includePattern" type="java.lang.String"/>
+      <param name="excludePattern" type="java.lang.String"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.LogAggregationContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="includePattern" type="java.lang.String"/>
+      <param name="excludePattern" type="java.lang.String"/>
+      <param name="rolledLogsIncludePattern" type="java.lang.String"/>
+      <param name="rolledLogsExcludePattern" type="java.lang.String"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.LogAggregationContext"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="includePattern" type="java.lang.String"/>
+      <param name="excludePattern" type="java.lang.String"/>
+      <param name="rolledLogsIncludePattern" type="java.lang.String"/>
+      <param name="rolledLogsExcludePattern" type="java.lang.String"/>
+      <param name="policyClassName" type="java.lang.String"/>
+      <param name="policyParameters" type="java.lang.String"/>
+    </method>
+    <method name="getIncludePattern" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get include pattern. This includePattern only takes affect
+ on logs that exist at the time of application finish.
+
+ @return include pattern]]>
+      </doc>
+    </method>
+    <method name="setIncludePattern"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="includePattern" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set include pattern. This includePattern only takes affect
+ on logs that exist at the time of application finish.
+
+ @param includePattern]]>
+      </doc>
+    </method>
+    <method name="getExcludePattern" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get exclude pattern. This excludePattern only takes affect
+ on logs that exist at the time of application finish.
+
+ @return exclude pattern]]>
+      </doc>
+    </method>
+    <method name="setExcludePattern"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="excludePattern" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set exclude pattern. This excludePattern only takes affect
+ on logs that exist at the time of application finish.
+
+ @param excludePattern]]>
+      </doc>
+    </method>
+    <method name="getRolledLogsIncludePattern" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get include pattern in a rolling fashion.
+ 
+ @return include pattern]]>
+      </doc>
+    </method>
+    <method name="setRolledLogsIncludePattern"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="rolledLogsIncludePattern" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set include pattern in a rolling fashion.
+ 
+ @param rolledLogsIncludePattern]]>
+      </doc>
+    </method>
+    <method name="getRolledLogsExcludePattern" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get exclude pattern for aggregation in a rolling fashion.
+ 
+ @return exclude pattern]]>
+      </doc>
+    </method>
+    <method name="setRolledLogsExcludePattern"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="rolledLogsExcludePattern" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set exclude pattern for in a rolling fashion.
+ 
+ @param rolledLogsExcludePattern]]>
+      </doc>
+    </method>
+    <method name="getLogAggregationPolicyClassName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the log aggregation policy class.
+
+ @return log aggregation policy class]]>
+      </doc>
+    </method>
+    <method name="setLogAggregationPolicyClassName"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="className" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the log aggregation policy class.
+
+ @param className]]>
+      </doc>
+    </method>
+    <method name="getLogAggregationPolicyParameters" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the log aggregation policy parameters.
+
+ @return log aggregation policy parameters]]>
+      </doc>
+    </method>
+    <method name="setLogAggregationPolicyParameters"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="parameters" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the log aggregation policy parameters.
+ There is no schema defined for the parameters string.
+ It is up to the log aggregation policy class to decide how to parse
+ the parameters string.
+
+ @param parameters]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@code LogAggregationContext} represents all of the
+ information needed by the {@code NodeManager} to handle
+ the logs for an application.
+ <p>
+ It includes details such as:
+ <ul>
+   <li>
+     includePattern. It uses Java Regex to filter the log files
+     which match the defined include pattern and those log files
+     will be uploaded when the application finishes.
+   </li>
+   <li>
+     excludePattern. It uses Java Regex to filter the log files
+     which match the defined exclude pattern and those log files
+     will not be uploaded when application finishes. If the log file
+     name matches both the include and the exclude pattern, this file
+     will be excluded eventually.
+   </li>
+   <li>
+     rolledLogsIncludePattern. It uses Java Regex to filter the log files
+     which match the defined include pattern and those log files
+     will be aggregated in a rolling fashion.
+   </li>
+   <li>
+     rolledLogsExcludePattern. It uses Java Regex to filter the log files
+     which match the defined exclude pattern and those log files
+     will not be aggregated in a rolling fashion. If the log file
+     name matches both the include and the exclude pattern, this file
+     will be excluded eventually.
+   </li>
+   <li>
+     policyClassName. The policy class name that implements
+     ContainerLogAggregationPolicy. At runtime, nodemanager will the policy
+     if a given container's log should be aggregated based on the
+     ContainerType and other runtime state such as exit code by calling
+     ContainerLogAggregationPolicy#shouldDoLogAggregation.
+     This is useful when the app only wants to aggregate logs of a subset of
+     containers. Here are the available policies. Please make sure to specify
+     the canonical name by prefixing org.apache.hadoop.yarn.server.
+     nodemanager.containermanager.logaggregation.
+     to the class simple name below.
+     NoneContainerLogAggregationPolicy: skip aggregation for all containers.
+     AllContainerLogAggregationPolicy: aggregate all containers.
+     AMOrFailedContainerLogAggregationPolicy: aggregate application master
+         or failed containers.
+     FailedOrKilledContainerLogAggregationPolicy: aggregate failed or killed
+         containers
+     FailedContainerLogAggregationPolicy: aggregate failed containers
+     AMOnlyLogAggregationPolicy: aggregate application master containers
+     SampleContainerLogAggregationPolicy: sample logs of successful worker
+         containers, in addition to application master and failed/killed
+         containers.
+     LimitSizeContainerLogAggregationPolicy: skip aggregation for killed
+         containers whose log size exceeds the limit of container log size.
+     If it isn't specified, it will use the cluster-wide default policy
+     defined by configuration yarn.nodemanager.log-aggregation.policy.class.
+     The default value of yarn.nodemanager.log-aggregation.policy.class is
+     AllContainerLogAggregationPolicy.
+   </li>
+   <li>
+     policyParameters. The parameters passed to the policy class via
+     ContainerLogAggregationPolicy#parseParameters during the policy object
+     initialization. This is optional. Some policy class might use parameters
+     to adjust its settings. It is up to policy class to define the scheme of
+     parameters.
+     For example, SampleContainerLogAggregationPolicy supports the format of
+     "SR:0.5,MIN:50", which means sample rate of 50% beyond the first 50
+     successful worker containers.
+   </li>
+ </ul>
+
+ @see ApplicationSubmissionContext]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.LogAggregationContext -->
+  <!-- start class org.apache.hadoop.yarn.api.records.NMToken -->
+  <class name="NMToken" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMToken"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link NodeId} of the <code>NodeManager</code> for which the NMToken
+ is used to authenticate.
+ @return the {@link NodeId} of the <code>NodeManager</code> for which the
+ NMToken is used to authenticate.]]>
+      </doc>
+    </method>
+    <method name="setNodeId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+    </method>
+    <method name="getToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link Token} used for authenticating with <code>NodeManager</code>
+ @return the {@link Token} used for authenticating with <code>NodeManager</code>]]>
+      </doc>
+    </method>
+    <method name="setToken"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="token" type="org.apache.hadoop.yarn.api.records.Token"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <doc>
+    <![CDATA[<p>The NMToken is used for authenticating communication with
+ <code>NodeManager</code></p>
+ <p>It is issued by <code>ResourceMananger</code> when <code>ApplicationMaster</code>
+ negotiates resource with <code>ResourceManager</code> and
+ validated on <code>NodeManager</code> side.</p>
+ @see  AllocateResponse#getNMTokens()]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.NMToken -->
+  <!-- start class org.apache.hadoop.yarn.api.records.NodeAttribute -->
+  <class name="NodeAttribute" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeAttribute"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.NodeAttribute"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributeName" type="java.lang.String"/>
+      <param name="attributeType" type="org.apache.hadoop.yarn.api.records.NodeAttributeType"/>
+      <param name="attributeValue" type="java.lang.String"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.NodeAttribute"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributePrefix" type="java.lang.String"/>
+      <param name="attributeName" type="java.lang.String"/>
+      <param name="attributeType" type="org.apache.hadoop.yarn.api.records.NodeAttributeType"/>
+      <param name="attributeValue" type="java.lang.String"/>
+    </method>
+    <method name="getAttributeKey" return="org.apache.hadoop.yarn.api.records.NodeAttributeKey"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setAttributeKey"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributeKey" type="org.apache.hadoop.yarn.api.records.NodeAttributeKey"/>
+    </method>
+    <method name="getAttributeValue" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setAttributeValue"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributeValue" type="java.lang.String"/>
+    </method>
+    <method name="getAttributeType" return="org.apache.hadoop.yarn.api.records.NodeAttributeType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setAttributeType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributeType" type="org.apache.hadoop.yarn.api.records.NodeAttributeType"/>
+    </method>
+    <field name="PREFIX_DISTRIBUTED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="PREFIX_CENTRALIZED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[<p>
+ Node Attribute is a kind of a label which represents one of the
+ attribute/feature of a Node. Its different from node partition label as
+ resource guarantees across the queues will not be maintained for these type
+ of labels.
+ </p>
+ <p>
+ A given Node can be mapped with any kind of attribute, few examples are
+ HAS_SSD=true, JAVA_VERSION=JDK1.8, OS_TYPE=WINDOWS.
+ </p>
+ <p>
+ Its not compulsory for all the attributes to have value, empty string is the
+ default value of the <code>NodeAttributeType.STRING</code>
+ </p>
+ <p>
+ Node Attribute Prefix is used as namespace to segregate the attributes.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.NodeAttribute -->
+  <!-- start class org.apache.hadoop.yarn.api.records.NodeAttributeInfo -->
+  <class name="NodeAttributeInfo" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeAttributeInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.NodeAttributeInfo"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAttribute" type="org.apache.hadoop.yarn.api.records.NodeAttribute"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.NodeAttributeInfo"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAttributeKey" type="org.apache.hadoop.yarn.api.records.NodeAttributeKey"/>
+      <param name="attributeType" type="org.apache.hadoop.yarn.api.records.NodeAttributeType"/>
+    </method>
+    <method name="getAttributeKey" return="org.apache.hadoop.yarn.api.records.NodeAttributeKey"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setAttributeKey"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributeKey" type="org.apache.hadoop.yarn.api.records.NodeAttributeKey"/>
+    </method>
+    <method name="getAttributeType" return="org.apache.hadoop.yarn.api.records.NodeAttributeType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setAttributeType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributeType" type="org.apache.hadoop.yarn.api.records.NodeAttributeType"/>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ Node Attribute Info describes a NodeAttribute.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.NodeAttributeInfo -->
+  <!-- start class org.apache.hadoop.yarn.api.records.NodeAttributeKey -->
+  <class name="NodeAttributeKey" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeAttributeKey"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.NodeAttributeKey"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributeName" type="java.lang.String"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.NodeAttributeKey"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributePrefix" type="java.lang.String"/>
+      <param name="attributeName" type="java.lang.String"/>
+    </method>
+    <method name="getAttributePrefix" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setAttributePrefix"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributePrefix" type="java.lang.String"/>
+    </method>
+    <method name="getAttributeName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setAttributeName"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributeName" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ Node AttributeKey uniquely identifies a given Node Attribute. Node Attribute
+ is identified based on attribute prefix and name.
+ </p>
+ <p>
+ Node Attribute Prefix is used as namespace to segregate the attributes.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.NodeAttributeKey -->
+  <!-- start class org.apache.hadoop.yarn.api.records.NodeAttributeOpCode -->
+  <class name="NodeAttributeOpCode" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.NodeAttributeOpCode[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.NodeAttributeOpCode"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Enumeration of various node attribute op codes.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.NodeAttributeOpCode -->
+  <!-- start class org.apache.hadoop.yarn.api.records.NodeAttributeType -->
+  <class name="NodeAttributeType" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.NodeAttributeType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.NodeAttributeType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ Type of a <code>node Attribute</code>.
+ </p>
+ Based on this attribute expressions and values will be evaluated.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.NodeAttributeType -->
+  <!-- start class org.apache.hadoop.yarn.api.records.NodeId -->
+  <class name="NodeId" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="NodeId"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="host" type="java.lang.String"/>
+      <param name="port" type="int"/>
+    </method>
+    <method name="getHost" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>hostname</em> of the node.
+ @return <em>hostname</em> of the node]]>
+      </doc>
+    </method>
+    <method name="getPort" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>port</em> for communicating with the node.
+ @return <em>port</em> for communicating with the node]]>
+      </doc>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+    </method>
+    <method name="fromString" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeIdStr" type="java.lang.String"/>
+    </method>
+    <method name="build"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[<p><code>NodeId</code> is the unique identifier for a node.</p>
+ 
+ <p>It includes the <em>hostname</em> and <em>port</em> to uniquely 
+ identify the node. Thus, it is unique across restarts of any 
+ <code>NodeManager</code>.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.NodeId -->
+  <!-- start class org.apache.hadoop.yarn.api.records.NodeLabel -->
+  <class name="NodeLabel" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="NodeLabel"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isExclusive" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.yarn.api.records.NodeLabel"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="DEFAULT_NODE_LABEL_PARTITION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default node label partition used for displaying.]]>
+      </doc>
+    </field>
+    <field name="NODE_LABEL_EXPRESSION_NOT_SET" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Node Label expression not set .]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NODE_LABEL_EXCLUSIVITY" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[By default, node label is exclusive or not]]>
+      </doc>
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.NodeLabel -->
+  <!-- start class org.apache.hadoop.yarn.api.records.NodeReport -->
+  <class name="NodeReport" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>NodeId</code> of the node.
+ @return <code>NodeId</code> of the node]]>
+      </doc>
+    </method>
+    <method name="getNodeState" return="org.apache.hadoop.yarn.api.records.NodeState"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>NodeState</code> of the node.
+ @return <code>NodeState</code> of the node]]>
+      </doc>
+    </method>
+    <method name="getHttpAddress" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>http address</em> of the node.
+ @return <em>http address</em> of the node]]>
+      </doc>
+    </method>
+    <method name="getRackName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>rack name</em> for the node.
+ @return <em>rack name</em> for the node]]>
+      </doc>
+    </method>
+    <method name="getUsed" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <em>used</em> <code>Resource</code> on the node.
+ @return <em>used</em> <code>Resource</code> on the node]]>
+      </doc>
+    </method>
+    <method name="getCapability" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>total</em> <code>Resource</code> on the node.
+ @return <em>total</em> <code>Resource</code> on the node]]>
+      </doc>
+    </method>
+    <method name="getHealthReport" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>diagnostic health report</em> of the node.
+ @return <em>diagnostic health report</em> of the node]]>
+      </doc>
+    </method>
+    <method name="getLastHealthReportTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>last timestamp</em> at which the health report was received.
+ @return <em>last timestamp</em> at which the health report was received]]>
+      </doc>
+    </method>
+    <method name="getNodeLabels" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get labels of this node.
+ @return labels of this node.]]>
+      </doc>
+    </method>
+    <method name="getAggregatedContainersUtilization" return="org.apache.hadoop.yarn.api.records.ResourceUtilization"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get containers aggregated resource utilization in a node.
+ @return containers resource utilization.]]>
+      </doc>
+    </method>
+    <method name="getNodeUtilization" return="org.apache.hadoop.yarn.api.records.ResourceUtilization"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get node resource utilization.
+ @return node resource utilization.]]>
+      </doc>
+    </method>
+    <method name="getDecommissioningTimeout" return="java.lang.Integer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Optional decommissioning timeout in seconds (null indicates absent
+ timeout).
+ @return the decommissioning timeout in second.]]>
+      </doc>
+    </method>
+    <method name="setDecommissioningTimeout"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="decommissioningTimeout" type="java.lang.Integer"/>
+      <doc>
+      <![CDATA[Set the decommissioning timeout in seconds (null indicates absent timeout).]]>
+      </doc>
+    </method>
+    <method name="getNodeUpdateType" return="org.apache.hadoop.yarn.api.records.NodeUpdateType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Optional node update type (null indicates absent update type).
+ @return the node update.]]>
+      </doc>
+    </method>
+    <method name="setNodeUpdateType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeUpdateType" type="org.apache.hadoop.yarn.api.records.NodeUpdateType"/>
+      <doc>
+      <![CDATA[Set the node update type (null indicates absent node update type).]]>
+      </doc>
+    </method>
+    <method name="setNodeAttributes"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAttributes" type="java.util.Set"/>
+      <doc>
+      <![CDATA[Set the node attributes of node.
+
+ @param nodeAttributes set of node attributes.]]>
+      </doc>
+    </method>
+    <method name="getNodeAttributes" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get node attributes of node.
+ @return the set of node attributes.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@code NodeReport} is a summary of runtime information of a node
+ in the cluster.
+ <p>
+ It includes details such as:
+ <ul>
+   <li>{@link NodeId} of the node.</li>
+   <li>HTTP Tracking URL of the node.</li>
+   <li>Rack name for the node.</li>
+   <li>Used {@link Resource} on the node.</li>
+   <li>Total available {@link Resource} of the node.</li>
+   <li>Number of running containers on the node.</li>
+ </ul>
+
+ @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.NodeReport -->
+  <!-- start class org.apache.hadoop.yarn.api.records.NodeState -->
+  <class name="NodeState" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.NodeState[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.NodeState"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="isUnusable" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isInactiveState" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isActiveState" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[<p>State of a <code>Node</code>.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.NodeState -->
+  <!-- start class org.apache.hadoop.yarn.api.records.NodeToAttributeValue -->
+  <class name="NodeToAttributeValue" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeToAttributeValue"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.NodeToAttributeValue"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hostname" type="java.lang.String"/>
+      <param name="attributeValue" type="java.lang.String"/>
+    </method>
+    <method name="getAttributeValue" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setAttributeValue"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributeValue" type="java.lang.String"/>
+    </method>
+    <method name="getHostname" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setHostname"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hostname" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ Mapping of Attribute Value to a Node.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.NodeToAttributeValue -->
+  <!-- start class org.apache.hadoop.yarn.api.records.PreemptionContainer -->
+  <class name="PreemptionContainer" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="PreemptionContainer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Container referenced by this handle.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Specific container requested back by the <code>ResourceManager</code>.
+ @see PreemptionContract
+ @see StrictPreemptionContract]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.PreemptionContainer -->
+  <!-- start class org.apache.hadoop.yarn.api.records.PreemptionContract -->
+  <class name="PreemptionContract" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="PreemptionContract"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getResourceRequest" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[If the AM releases resources matching these requests, then the {@link
+ PreemptionContainer}s enumerated in {@link #getContainers()} should not be
+ evicted from the cluster. Due to delays in propagating cluster state and
+ sending these messages, there are conditions where satisfied contracts may
+ not prevent the platform from killing containers.
+ @return List of {@link PreemptionResourceRequest} to update the
+ <code>ApplicationMaster</code> about resources requested back by the
+ <code>ResourceManager</code>.
+ @see AllocateRequest#setAskList(List)]]>
+      </doc>
+    </method>
+    <method name="getContainers" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Assign the set of {@link PreemptionContainer} specifying which containers
+ owned by the <code>ApplicationMaster</code> that may be reclaimed by the
+ <code>ResourceManager</code>. If the AM prefers a different set of
+ containers, then it may checkpoint or kill containers matching the
+ description in {@link #getResourceRequest}.
+ @return Set of containers at risk if the contract is not met.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Description of resources requested back by the <code>ResourceManager</code>.
+ The <code>ApplicationMaster</code> (AM) can satisfy this request according
+ to its own priorities to prevent containers from being forcibly killed by
+ the platform.
+ @see PreemptionMessage]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.PreemptionContract -->
+  <!-- start class org.apache.hadoop.yarn.api.records.PreemptionMessage -->
+  <class name="PreemptionMessage" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="PreemptionMessage"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getStrictContract" return="org.apache.hadoop.yarn.api.records.StrictPreemptionContract"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Specific resources that may be killed by the
+ <code>ResourceManager</code>]]>
+      </doc>
+    </method>
+    <method name="getContract" return="org.apache.hadoop.yarn.api.records.PreemptionContract"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Contract describing resources to return to the cluster.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A {@link PreemptionMessage} is part of the RM-AM protocol, and it is used by
+ the RM to specify resources that the RM wants to reclaim from this
+ {@code ApplicationMaster} (AM). The AM receives a {@link
+ StrictPreemptionContract} message encoding which containers the platform may
+ forcibly kill, granting it an opportunity to checkpoint state or adjust its
+ execution plan. The message may also include a {@link PreemptionContract}
+ granting the AM more latitude in selecting which resources to return to the
+ cluster.
+ <p>
+ The AM should decode both parts of the message. The {@link
+ StrictPreemptionContract} specifies particular allocations that the RM
+ requires back. The AM can checkpoint containers' state, adjust its execution
+ plan to move the computation, or take no action and hope that conditions that
+ caused the RM to ask for the container will change.
+ <p>
+ In contrast, the {@link PreemptionContract} also includes a description of
+ resources with a set of containers. If the AM releases containers matching
+ that profile, then the containers enumerated in {@link
+ PreemptionContract#getContainers()} may not be killed.
+ <p>
+ Each preemption message reflects the RM's current understanding of the
+ cluster state, so a request to return <em>N</em> containers may not
+ reflect containers the AM is releasing, recently exited containers the RM has
+ yet to learn about, or new containers allocated before the message was
+ generated. Conversely, an RM may request a different profile of containers in
+ subsequent requests.
+ <p>
+ The policy enforced by the RM is part of the scheduler. Generally, only
+ containers that have been requested consistently should be killed, but the
+ details are not specified.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.PreemptionMessage -->
+  <!-- start class org.apache.hadoop.yarn.api.records.PreemptionResourceRequest -->
+  <class name="PreemptionResourceRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="PreemptionResourceRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getResourceRequest" return="org.apache.hadoop.yarn.api.records.ResourceRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return Resource described in this request, to be matched against running
+ containers.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Description of resources requested back by the cluster.
+ @see PreemptionContract
+ @see AllocateRequest#setAskList(java.util.List)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.PreemptionResourceRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.records.Priority -->
+  <class name="Priority" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="Priority"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="p" type="int"/>
+    </method>
+    <method name="getPriority" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the assigned priority
+ @return the assigned priority]]>
+      </doc>
+    </method>
+    <method name="setPriority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="int"/>
+      <doc>
+      <![CDATA[Set the assigned priority
+ @param priority the assigned priority]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.yarn.api.records.Priority"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="UNDEFINED" type="org.apache.hadoop.yarn.api.records.Priority"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[The priority assigned to a ResourceRequest or Application or Container 
+ allocation]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.Priority -->
+  <!-- start class org.apache.hadoop.yarn.api.records.QueueACL -->
+  <class name="QueueACL" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.QueueACL[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.QueueACL"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[{@code QueueACL} enumerates the various ACLs for queues.
+ <p>
+ The ACL is one of:
+ <ul>
+   <li>
+     {@link #SUBMIT_APPLICATIONS} - ACL to submit applications to the queue.
+   </li>
+   <li>{@link #ADMINISTER_QUEUE} - ACL to administer the queue.</li>
+ </ul>
+ 
+ @see QueueInfo
+ @see ApplicationClientProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.QueueACL -->
+  <!-- start class org.apache.hadoop.yarn.api.records.QueueInfo -->
+  <class name="QueueInfo" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="QueueInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getQueueName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>name</em> of the queue.
+ @return <em>name</em> of the queue]]>
+      </doc>
+    </method>
+    <method name="getCapacity" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>configured capacity</em> of the queue.
+ @return <em>configured capacity</em> of the queue]]>
+      </doc>
+    </method>
+    <method name="getMaximumCapacity" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>maximum capacity</em> of the queue.
+ @return <em>maximum capacity</em> of the queue]]>
+      </doc>
+    </method>
+    <method name="getCurrentCapacity" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>current capacity</em> of the queue.
+ @return <em>current capacity</em> of the queue]]>
+      </doc>
+    </method>
+    <method name="getChildQueues" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>child queues</em> of the queue.
+ @return <em>child queues</em> of the queue]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>running applications</em> of the queue.
+ @return <em>running applications</em> of the queue]]>
+      </doc>
+    </method>
+    <method name="getQueueState" return="org.apache.hadoop.yarn.api.records.QueueState"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>QueueState</code> of the queue.
+ @return <code>QueueState</code> of the queue]]>
+      </doc>
+    </method>
+    <method name="getAccessibleNodeLabels" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>accessible node labels</code> of the queue.
+ @return <code>accessible node labels</code> of the queue]]>
+      </doc>
+    </method>
+    <method name="getDefaultNodeLabelExpression" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>default node label expression</code> of the queue, this takes
+ affect only when the <code>ApplicationSubmissionContext</code> and
+ <code>ResourceRequest</code> don't specify their
+ <code>NodeLabelExpression</code>.
+ 
+ @return <code>default node label expression</code> of the queue]]>
+      </doc>
+    </method>
+    <method name="setDefaultNodeLabelExpression"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="defaultLabelExpression" type="java.lang.String"/>
+    </method>
+    <method name="getQueueStatistics" return="org.apache.hadoop.yarn.api.records.QueueStatistics"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>queue stats</code> for the queue
+
+ @return <code>queue stats</code> of the queue]]>
+      </doc>
+    </method>
+    <method name="setQueueStatistics"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queueStatistics" type="org.apache.hadoop.yarn.api.records.QueueStatistics"/>
+      <doc>
+      <![CDATA[Set the queue statistics for the queue
+ 
+ @param queueStatistics
+          the queue statistics]]>
+      </doc>
+    </method>
+    <method name="getPreemptionDisabled" return="java.lang.Boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>preemption status</em> of the queue.
+ @return if property is not in proto, return null;
+        otherwise, return <em>preemption status</em> of the queue]]>
+      </doc>
+    </method>
+    <method name="getQueueConfigurations" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the per-node-label queue configurations of the queue.
+
+ @return the per-node-label queue configurations of the queue.]]>
+      </doc>
+    </method>
+    <method name="getIntraQueuePreemptionDisabled" return="java.lang.Boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the intra-queue preemption status of the queue.
+ @return if property is not in proto, return null;
+        otherwise, return intra-queue preemption status of the queue]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[QueueInfo is a report of the runtime information of the queue.
+ <p>
+ It includes information such as:
+ <ul>
+   <li>Queue name.</li>
+   <li>Capacity of the queue.</li>
+   <li>Maximum capacity of the queue.</li>
+   <li>Current capacity of the queue.</li>
+   <li>Child queues.</li>
+   <li>Running applications.</li>
+   <li>{@link QueueState} of the queue.</li>
+   <li>{@link QueueConfigurations} of the queue.</li>
+ </ul>
+
+ @see QueueState
+ @see QueueConfigurations
+ @see ApplicationClientProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.QueueInfo -->
+  <!-- start class org.apache.hadoop.yarn.api.records.QueueState -->
+  <class name="QueueState" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.QueueState[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.QueueState"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[State of a Queue.
+ <p>
+ A queue is in one of:
+ <ul>
+   <li>{@link #RUNNING} - normal state.</li>
+   <li>{@link #STOPPED} - not accepting new application submissions.</li>
+   <li>
+     {@link #DRAINING} - not accepting new application submissions
+     and waiting for applications finish.
+   </li>
+ </ul>
+ 
+ @see QueueInfo
+ @see ApplicationClientProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.QueueState -->
+  <!-- start class org.apache.hadoop.yarn.api.records.QueueStatistics -->
+  <class name="QueueStatistics" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="QueueStatistics"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNumAppsSubmitted" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of apps submitted
+ 
+ @return the number of apps submitted]]>
+      </doc>
+    </method>
+    <method name="setNumAppsSubmitted"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numAppsSubmitted" type="long"/>
+      <doc>
+      <![CDATA[Set the number of apps submitted
+ 
+ @param numAppsSubmitted
+          the number of apps submitted]]>
+      </doc>
+    </method>
+    <method name="getNumAppsRunning" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of running apps
+ 
+ @return the number of running apps]]>
+      </doc>
+    </method>
+    <method name="setNumAppsRunning"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numAppsRunning" type="long"/>
+      <doc>
+      <![CDATA[Set the number of running apps
+ 
+ @param numAppsRunning
+          the number of running apps]]>
+      </doc>
+    </method>
+    <method name="getNumAppsPending" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of pending apps
+ 
+ @return the number of pending apps]]>
+      </doc>
+    </method>
+    <method name="setNumAppsPending"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numAppsPending" type="long"/>
+      <doc>
+      <![CDATA[Set the number of pending apps
+ 
+ @param numAppsPending
+          the number of pending apps]]>
+      </doc>
+    </method>
+    <method name="getNumAppsCompleted" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of completed apps
+ 
+ @return the number of completed apps]]>
+      </doc>
+    </method>
+    <method name="setNumAppsCompleted"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numAppsCompleted" type="long"/>
+      <doc>
+      <![CDATA[Set the number of completed apps
+ 
+ @param numAppsCompleted
+          the number of completed apps]]>
+      </doc>
+    </method>
+    <method name="getNumAppsKilled" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of killed apps
+ 
+ @return the number of killed apps]]>
+      </doc>
+    </method>
+    <method name="setNumAppsKilled"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numAppsKilled" type="long"/>
+      <doc>
+      <![CDATA[Set the number of killed apps
+ 
+ @param numAppsKilled
+          the number of killed apps]]>
+      </doc>
+    </method>
+    <method name="getNumAppsFailed" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of failed apps
+ 
+ @return the number of failed apps]]>
+      </doc>
+    </method>
+    <method name="setNumAppsFailed"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numAppsFailed" type="long"/>
+      <doc>
+      <![CDATA[Set the number of failed apps
+ 
+ @param numAppsFailed
+          the number of failed apps]]>
+      </doc>
+    </method>
+    <method name="getNumActiveUsers" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of active users
+ 
+ @return the number of active users]]>
+      </doc>
+    </method>
+    <method name="setNumActiveUsers"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numActiveUsers" type="long"/>
+      <doc>
+      <![CDATA[Set the number of active users
+ 
+ @param numActiveUsers
+          the number of active users]]>
+      </doc>
+    </method>
+    <method name="getAvailableMemoryMB" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the available memory in MB
+ 
+ @return the available memory]]>
+      </doc>
+    </method>
+    <method name="setAvailableMemoryMB"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="availableMemoryMB" type="long"/>
+      <doc>
+      <![CDATA[Set the available memory in MB
+ 
+ @param availableMemoryMB
+          the available memory]]>
+      </doc>
+    </method>
+    <method name="getAllocatedMemoryMB" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the allocated memory in MB
+ 
+ @return the allocated memory]]>
+      </doc>
+    </method>
+    <method name="setAllocatedMemoryMB"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allocatedMemoryMB" type="long"/>
+      <doc>
+      <![CDATA[Set the allocated memory in MB
+ 
+ @param allocatedMemoryMB
+          the allocate memory]]>
+      </doc>
+    </method>
+    <method name="getPendingMemoryMB" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the pending memory in MB
+ 
+ @return the pending memory]]>
+      </doc>
+    </method>
+    <method name="setPendingMemoryMB"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pendingMemoryMB" type="long"/>
+      <doc>
+      <![CDATA[Set the pending memory in MB
+ 
+ @param pendingMemoryMB
+          the pending memory]]>
+      </doc>
+    </method>
+    <method name="getReservedMemoryMB" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the reserved memory in MB
+ 
+ @return the reserved memory]]>
+      </doc>
+    </method>
+    <method name="setReservedMemoryMB"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservedMemoryMB" type="long"/>
+      <doc>
+      <![CDATA[Set the reserved memory in MB
+ 
+ @param reservedMemoryMB
+          the reserved memory]]>
+      </doc>
+    </method>
+    <method name="getAvailableVCores" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the available vcores
+ 
+ @return the available vcores]]>
+      </doc>
+    </method>
+    <method name="setAvailableVCores"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="availableVCores" type="long"/>
+      <doc>
+      <![CDATA[Set the available vcores
+ 
+ @param availableVCores
+          the available vcores]]>
+      </doc>
+    </method>
+    <method name="getAllocatedVCores" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the allocated vcores
+ 
+ @return the allocated vcores]]>
+      </doc>
+    </method>
+    <method name="setAllocatedVCores"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allocatedVCores" type="long"/>
+      <doc>
+      <![CDATA[Set the allocated vcores
+ 
+ @param allocatedVCores
+          the allocated vcores]]>
+      </doc>
+    </method>
+    <method name="getPendingVCores" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the pending vcores
+ 
+ @return the pending vcores]]>
+      </doc>
+    </method>
+    <method name="setPendingVCores"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pendingVCores" type="long"/>
+      <doc>
+      <![CDATA[Set the pending vcores
+ 
+ @param pendingVCores
+          the pending vcores]]>
+      </doc>
+    </method>
+    <method name="getPendingContainers" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of pending containers.
+ @return the number of pending containers.]]>
+      </doc>
+    </method>
+    <method name="setPendingContainers"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pendingContainers" type="long"/>
+      <doc>
+      <![CDATA[Set the number of pending containers.
+ @param pendingContainers the pending containers.]]>
+      </doc>
+    </method>
+    <method name="getAllocatedContainers" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of allocated containers.
+ @return the number of allocated containers.]]>
+      </doc>
+    </method>
+    <method name="setAllocatedContainers"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allocatedContainers" type="long"/>
+      <doc>
+      <![CDATA[Set the number of allocated containers.
+ @param allocatedContainers the allocated containers.]]>
+      </doc>
+    </method>
+    <method name="getReservedContainers" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of reserved containers.
+ @return the number of reserved containers.]]>
+      </doc>
+    </method>
+    <method name="setReservedContainers"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservedContainers" type="long"/>
+      <doc>
+      <![CDATA[Set the number of reserved containers.
+ @param reservedContainers the reserved containers.]]>
+      </doc>
+    </method>
+    <method name="getReservedVCores" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the reserved vcores
+ 
+ @return the reserved vcores]]>
+      </doc>
+    </method>
+    <method name="setReservedVCores"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservedVCores" type="long"/>
+      <doc>
+      <![CDATA[Set the reserved vcores
+ 
+ @param reservedVCores
+          the reserved vcores]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.QueueStatistics -->
+  <!-- start class org.apache.hadoop.yarn.api.records.QueueUserACLInfo -->
+  <class name="QueueUserACLInfo" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="QueueUserACLInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getQueueName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>queue name</em> of the queue.
+ @return <em>queue name</em> of the queue]]>
+      </doc>
+    </method>
+    <method name="getUserAcls" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of <code>QueueACL</code> for the given user.
+ @return list of <code>QueueACL</code> for the given user]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p><code>QueueUserACLInfo</code> provides information {@link QueueACL} for
+ the given user.</p>
+ 
+ @see QueueACL
+ @see ApplicationClientProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.QueueUserACLInfo -->
+  <!-- start class org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest -->
+  <class name="RejectedSchedulingRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RejectedSchedulingRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reason" type="org.apache.hadoop.yarn.api.records.RejectionReason"/>
+      <param name="request" type="org.apache.hadoop.yarn.api.records.SchedulingRequest"/>
+      <doc>
+      <![CDATA[Create new RejectedSchedulingRequest.
+ @param reason Rejection Reason.
+ @param request Rejected Scheduling Request.
+ @return RejectedSchedulingRequest.]]>
+      </doc>
+    </method>
+    <method name="getReason" return="org.apache.hadoop.yarn.api.records.RejectionReason"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get Rejection Reason.
+ @return Rejection reason.]]>
+      </doc>
+    </method>
+    <method name="setReason"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reason" type="org.apache.hadoop.yarn.api.records.RejectionReason"/>
+      <doc>
+      <![CDATA[Set Rejection Reason.
+ @param reason Rejection Reason.]]>
+      </doc>
+    </method>
+    <method name="getRequest" return="org.apache.hadoop.yarn.api.records.SchedulingRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the Rejected Scheduling Request.
+ @return SchedulingRequest.]]>
+      </doc>
+    </method>
+    <method name="setRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.records.SchedulingRequest"/>
+      <doc>
+      <![CDATA[Set the SchedulingRequest.
+ @param request SchedulingRequest.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This encapsulates a Rejected SchedulingRequest. It contains the offending
+ Scheduling Request along with the reason for rejection.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.records.RejectionReason -->
+  <class name="RejectionReason" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.RejectionReason[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.RejectionReason"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Reason for rejecting a Scheduling Request.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.RejectionReason -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ReservationACL -->
+  <class name="ReservationACL" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.ReservationACL[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.ReservationACL"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[{@code ReservationACL} enumerates the various ACLs for reservations.
+ <p>
+ The ACL is one of:
+ <ul>
+   <li>
+     {@link #ADMINISTER_RESERVATIONS} - ACL to create, list, update and
+     delete reservations.
+   </li>
+   <li> {@link #LIST_RESERVATIONS} - ACL to list reservations. </li>
+   <li> {@link #SUBMIT_RESERVATIONS} - ACL to create reservations. </li>
+ </ul>
+ Users can always list, update and delete their own reservations.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ReservationACL -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ReservationAllocationState -->
+  <class name="ReservationAllocationState" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReservationAllocationState"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ReservationAllocationState"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="acceptanceTime" type="long"/>
+      <param name="user" type="java.lang.String"/>
+      <param name="resourceAllocations" type="java.util.List"/>
+      <param name="reservationId" type="org.apache.hadoop.yarn.api.records.ReservationId"/>
+      <param name="reservationDefinition" type="org.apache.hadoop.yarn.api.records.ReservationDefinition"/>
+      <doc>
+      <![CDATA[@param acceptanceTime The acceptance time of the reservation.
+ @param user The username of the user who made the reservation.
+ @param resourceAllocations List of {@link ResourceAllocationRequest}
+                            representing the current state of the
+                            reservation resource allocations. This is
+                            subject to change in the event of re-planning.
+ @param reservationId {@link ReservationId } of the reservation being
+                                            listed.
+ @param reservationDefinition {@link ReservationDefinition} used to make
+                              the reservation.
+ @return {@code ReservationAllocationState} that represents the state of
+ the reservation.]]>
+      </doc>
+    </method>
+    <method name="getAcceptanceTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the acceptance time of the reservation.
+
+ @return the time that the reservation was accepted.]]>
+      </doc>
+    </method>
+    <method name="getUser" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user who made the reservation.
+
+ @return the name of the user who made the reservation.]]>
+      </doc>
+    </method>
+    <method name="getResourceAllocationRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the Resource allocations of the reservation based on the current state
+ of the plan. This is subject to change in the event of re-planning.
+ The allocations will be constraint to the user contract as described by
+ the {@link ReservationDefinition}
+
+ @return a list of resource allocations for the reservation.]]>
+      </doc>
+    </method>
+    <method name="getReservationId" return="org.apache.hadoop.yarn.api.records.ReservationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the id of the reservation.
+
+ @return the reservation id corresponding to the reservation.]]>
+      </doc>
+    </method>
+    <method name="getReservationDefinition" return="org.apache.hadoop.yarn.api.records.ReservationDefinition"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the reservation definition used to make the reservation.
+
+ @return the reservation definition used to make the reservation.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@code ReservationAllocationState} represents the reservation that is
+ made by a user.
+ <p>
+ It includes:
+ <ul>
+   <li>Duration of the reservation.</li>
+   <li>Acceptance time of the duration.</li>
+   <li>
+       List of {@link ResourceAllocationRequest}, which includes the time
+       interval, and capability of the allocation.
+       {@code ResourceAllocationRequest} represents an allocation
+       made for a reservation for the current state of the queue. This can be
+       changed for reasons such as re-planning, but will always be subject to
+       the constraints of the user contract as described by
+       {@link ReservationDefinition}
+   </li>
+   <li>{@link ReservationId} of the reservation.</li>
+   <li>{@link ReservationDefinition} used to make the reservation.</li>
+ </ul>
+
+ @see ResourceAllocationRequest
+ @see ReservationId
+ @see ReservationDefinition]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ReservationAllocationState -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ReservationDefinition -->
+  <class name="ReservationDefinition" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReservationDefinition"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ReservationDefinition"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="arrival" type="long"/>
+      <param name="deadline" type="long"/>
+      <param name="reservationRequests" type="org.apache.hadoop.yarn.api.records.ReservationRequests"/>
+      <param name="name" type="java.lang.String"/>
+      <param name="recurrenceExpression" type="java.lang.String"/>
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ReservationDefinition"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="arrival" type="long"/>
+      <param name="deadline" type="long"/>
+      <param name="reservationRequests" type="org.apache.hadoop.yarn.api.records.ReservationRequests"/>
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="getArrival" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the arrival time or the earliest time from which the resource(s) can be
+ allocated. Time expressed as UTC.
+ 
+ @return the earliest valid time for this reservation]]>
+      </doc>
+    </method>
+    <method name="setArrival"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="earliestStartTime" type="long"/>
+      <doc>
+      <![CDATA[Set the arrival time or the earliest time from which the resource(s) can be
+ allocated. Time expressed as UTC.
+ 
+ @param earliestStartTime the earliest valid time for this reservation]]>
+      </doc>
+    </method>
+    <method name="getDeadline" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the deadline or the latest time by when the resource(s) must be
+ allocated. Time expressed as UTC.
+ 
+ @return the deadline or the latest time by when the resource(s) must be
+         allocated]]>
+      </doc>
+    </method>
+    <method name="setDeadline"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="latestEndTime" type="long"/>
+      <doc>
+      <![CDATA[Set the deadline or the latest time by when the resource(s) must be
+ allocated. Time expressed as UTC.
+ 
+ @param latestEndTime the deadline or the latest time by when the
+          resource(s) should be allocated]]>
+      </doc>
+    </method>
+    <method name="getReservationRequests" return="org.apache.hadoop.yarn.api.records.ReservationRequests"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of {@link ReservationRequests} representing the resources
+ required by the application
+ 
+ @return the list of {@link ReservationRequests}]]>
+      </doc>
+    </method>
+    <method name="setReservationRequests"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservationRequests" type="org.apache.hadoop.yarn.api.records.ReservationRequests"/>
+      <doc>
+      <![CDATA[Set the list of {@link ReservationRequests} representing the resources
+ required by the application
+ 
+ @param reservationRequests the list of {@link ReservationRequests}]]>
+      </doc>
+    </method>
+    <method name="getReservationName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the name for this reservation. The name need-not be unique, and it is
+ just a mnemonic for the user (akin to job names). Accepted reservations are
+ uniquely identified by a system-generated ReservationId.
+ 
+ @return string representing the name of the corresponding reserved resource
+         allocation in the scheduler]]>
+      </doc>
+    </method>
+    <method name="setReservationName"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the name for this reservation. The name need-not be unique, and it is
+ just a mnemonic for the user (akin to job names). Accepted reservations are
+ uniquely identified by a system-generated ReservationId.
+ 
+ @param name representing the name of the corresponding reserved resource
+          allocation in the scheduler]]>
+      </doc>
+    </method>
+    <method name="getRecurrenceExpression" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the recurrence of this reservation representing the time period of
+ the periodic job. Currently, only long values are supported. Later,
+ support for regular expressions denoting arbitrary recurrence patterns
+ (e.g., every Tuesday and Thursday) will be added.
+ Recurrence is represented in milliseconds for periodic jobs.
+ Recurrence is 0 for non-periodic jobs. Periodic jobs are valid until they
+ are explicitly cancelled and have higher priority than non-periodic jobs
+ (during initial placement and replanning). Periodic job allocations are
+ consistent across runs (flexibility in allocation is leveraged only during
+ initial placement, allocations remain consistent thereafter). Note that
+ as a long, the recurrence expression must be greater than the duration of
+ the reservation (deadline - arrival). Also note that the configured max
+ period must be divisible by the recurrence expression if expressed as a
+ long.
+
+ @return recurrence of this reservation]]>
+      </doc>
+    </method>
+    <method name="setRecurrenceExpression"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="recurrenceExpression" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the recurrence of this reservation representing the time period of
+ the periodic job. Currently, only long values are supported. Later,
+ support for regular expressions denoting arbitrary recurrence patterns
+ (e.g., every Tuesday and Thursday) will be added.
+ Recurrence is represented in milliseconds for periodic jobs.
+ Recurrence is 0 for non-periodic jobs. Periodic jobs are valid until they
+ are explicitly cancelled and have higher priority than non-periodic jobs
+ (during initial placement and replanning). Periodic job allocations are
+ consistent across runs (flexibility in allocation is leveraged only during
+ initial placement, allocations remain consistent thereafter). Note that
+ as a long, the recurrence expression must be greater than the duration of
+ the reservation (deadline - arrival). Also note that the configured max
+ period must be divisible by the recurrence expression if expressed as a
+ long.
+
+ @param recurrenceExpression recurrence interval of this reservation]]>
+      </doc>
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the priority for this reservation. A lower number for priority
+ indicates a higher priority reservation. Recurring reservations are
+ always higher priority than non-recurring reservations. Priority for
+ non-recurring reservations are only compared with non-recurring
+ reservations. Likewise for recurring reservations.
+
+ @return int representing the priority of the reserved resource
+         allocation in the scheduler]]>
+      </doc>
+    </method>
+    <method name="setPriority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <doc>
+      <![CDATA[Set the priority for this reservation. A lower number for priority
+ indicates a higher priority reservation. Recurring reservations are
+ always higher priority than non-recurring reservations. Priority for
+ non-recurring reservations are only compared with non-recurring
+ reservations. Likewise for recurring reservations.
+
+ @param priority representing the priority of the reserved resource
+          allocation in the scheduler]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@link ReservationDefinition} captures the set of resource and time
+ constraints the user cares about regarding a reservation.
+ 
+ @see ResourceRequest]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ReservationDefinition -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ReservationId -->
+  <class name="ReservationId" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="ReservationId"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getId" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the long identifier of the {@link ReservationId} which is unique for
+ all Reservations started by a particular instance of the
+ {@code ResourceManager}.
+
+ @return long identifier of the {@link ReservationId}]]>
+      </doc>
+    </method>
+    <method name="getClusterTimestamp" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>start time</em> of the {@code ResourceManager} which is used to
+ generate globally unique {@link ReservationId}.
+
+ @return <em>start time</em> of the {@code ResourceManager}]]>
+      </doc>
+    </method>
+    <method name="build"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.yarn.api.records.ReservationId"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="parseReservationId" return="org.apache.hadoop.yarn.api.records.ReservationId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservationId" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Parse the string argument as a {@link ReservationId}
+
+ @param reservationId the string representation of the {@link ReservationId}
+ @return the {@link ReservationId} corresponding to the input string if
+         valid, null if input is null
+ @throws IOException if unable to parse the input string]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <field name="reserveIdStrPrefix" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="clusterTimestamp" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="id" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[<p>
+ {@link ReservationId} represents the <em>globally unique</em> identifier for
+ a reservation.
+ </p>
+
+ <p>
+ The globally unique nature of the identifier is achieved by using the
+ <em>cluster timestamp</em> i.e. start-time of the {@code ResourceManager}
+ along with a monotonically increasing counter for the reservation.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ReservationId -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ReservationRequest -->
+  <class name="ReservationRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="ReservationRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ReservationRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="numContainers" type="int"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ReservationRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="numContainers" type="int"/>
+      <param name="concurrency" type="int"/>
+      <param name="duration" type="long"/>
+    </method>
+    <method name="getCapability" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link Resource} capability of the request.
+ 
+ @return {@link Resource} capability of the request]]>
+      </doc>
+    </method>
+    <method name="setCapability"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Set the {@link Resource} capability of the request
+ 
+ @param capability {@link Resource} capability of the request]]>
+      </doc>
+    </method>
+    <method name="getNumContainers" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of containers required with the given specifications.
+ 
+ @return number of containers required with the given specifications]]>
+      </doc>
+    </method>
+    <method name="setNumContainers"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numContainers" type="int"/>
+      <doc>
+      <![CDATA[Set the number of containers required with the given specifications
+ 
+ @param numContainers number of containers required with the given
+          specifications]]>
+      </doc>
+    </method>
+    <method name="getConcurrency" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of containers that need to be scheduled concurrently. The
+ default value of 1 would fall back to the current non concurrency
+ constraints on the scheduling behavior.
+ 
+ @return the number of containers to be concurrently scheduled]]>
+      </doc>
+    </method>
+    <method name="setConcurrency"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numContainers" type="int"/>
+      <doc>
+      <![CDATA[Set the number of containers that need to be scheduled concurrently. The
+ default value of 1 would fall back to the current non concurrency
+ constraints on the scheduling behavior.
+ 
+ @param numContainers the number of containers to be concurrently scheduled]]>
+      </doc>
+    </method>
+    <method name="getDuration" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the duration in milliseconds for which the resource is required. A
+ default value of -1, indicates an unspecified lease duration, and fallback
+ to current behavior.
+ 
+ @return the duration in milliseconds for which the resource is required]]>
+      </doc>
+    </method>
+    <method name="setDuration"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="duration" type="long"/>
+      <doc>
+      <![CDATA[Set the duration in milliseconds for which the resource is required.
+ 
+ @param duration the duration in milliseconds for which the resource is
+          required]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.yarn.api.records.ReservationRequest"/>
+    </method>
+    <doc>
+    <![CDATA[{@link ReservationRequest} represents the request made by an application to
+ the {@code ResourceManager} to reserve {@link Resource}s.
+ <p>
+ It includes:
+ <ul>
+   <li>{@link Resource} required for each request.</li>
+   <li>
+     Number of containers, of above specifications, which are required by the
+     application.
+   </li>
+   <li>Concurrency that indicates the gang size of the request.</li>
+ </ul>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ReservationRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ReservationRequest.ReservationRequestComparator -->
+  <class name="ReservationRequest.ReservationRequestComparator" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.Comparator"/>
+    <implements name="java.io.Serializable"/>
+    <constructor name="ReservationRequestComparator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="compare" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="r1" type="org.apache.hadoop.yarn.api.records.ReservationRequest"/>
+      <param name="r2" type="org.apache.hadoop.yarn.api.records.ReservationRequest"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ReservationRequest.ReservationRequestComparator -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter -->
+  <class name="ReservationRequestInterpreter" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Enumeration of various types of dependencies among multiple
+ {@link ReservationRequests} within one {@link ReservationDefinition} (from
+ least constraining to most constraining).]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ReservationRequests -->
+  <class name="ReservationRequests" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ReservationRequests"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ReservationRequests"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservationResources" type="java.util.List"/>
+      <param name="type" type="org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter"/>
+    </method>
+    <method name="getReservationResources" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of {@link ReservationRequest} representing the resources
+ required by the application
+ 
+ @return the list of {@link ReservationRequest}]]>
+      </doc>
+    </method>
+    <method name="setReservationResources"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reservationResources" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the list of {@link ReservationRequest} representing the resources
+ required by the application
+ 
+ @param reservationResources the list of {@link ReservationRequest}]]>
+      </doc>
+    </method>
+    <method name="getInterpreter" return="org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@link ReservationRequestInterpreter}, representing how the list of
+ resources should be allocated, this captures temporal ordering and other
+ constraints.
+ 
+ @return the list of {@link ReservationRequestInterpreter}]]>
+      </doc>
+    </method>
+    <method name="setInterpreter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="interpreter" type="org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter"/>
+      <doc>
+      <![CDATA[Set the {@link ReservationRequestInterpreter}, representing how the list of
+ resources should be allocated, this captures temporal ordering and other
+ constraints.
+ 
+ @param interpreter the {@link ReservationRequestInterpreter} for this
+          reservation]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@link ReservationRequests} captures the set of resource and constraints the
+ user cares about regarding a reservation.
+ 
+ @see ReservationRequest]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ReservationRequests -->
+  <!-- start class org.apache.hadoop.yarn.api.records.Resource -->
+  <class name="Resource" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="Resource"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="memory" type="int"/>
+      <param name="vCores" type="int"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="memory" type="long"/>
+      <param name="vCores" type="int"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="memory" type="long"/>
+      <param name="vCores" type="int"/>
+      <param name="others" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Create a new {@link Resource} instance with the given CPU and memory
+ values and additional resource values as set in the {@code others}
+ parameter. Note that the CPU and memory settings in the {@code others}
+ parameter will be ignored.
+
+ @param memory the memory value
+ @param vCores the CPU value
+ @param others a map of other resource values indexed by resource name
+ @return a {@link Resource} instance with the given resource values]]>
+      </doc>
+    </method>
+    <method name="getMemory" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This method is DEPRECATED:
+ Use {@link Resource#getMemorySize()} instead
+
+ Get <em>memory</em> of the resource. Note - while memory has
+ never had a unit specified, all YARN configurations have specified memory
+ in MB. The assumption has been that the daemons and applications are always
+ using the same units. With the introduction of the ResourceInformation
+ class we have support for units - so this function will continue to return
+ memory but in the units of MB
+
+ @return <em>memory</em>(in MB) of the resource]]>
+      </doc>
+    </method>
+    <method name="getMemorySize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <em>memory</em> of the resource. Note - while memory has
+ never had a unit specified, all YARN configurations have specified memory
+ in MB. The assumption has been that the daemons and applications are always
+ using the same units. With the introduction of the ResourceInformation
+ class we have support for units - so this function will continue to return
+ memory but in the units of MB
+
+ @return <em>memory</em> of the resource]]>
+      </doc>
+    </method>
+    <method name="setMemory"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="memory" type="int"/>
+      <doc>
+      <![CDATA[Set <em>memory</em> of the resource. Note - while memory has
+ never had a unit specified, all YARN configurations have specified memory
+ in MB. The assumption has been that the daemons and applications are always
+ using the same units. With the introduction of the ResourceInformation
+ class we have support for units - so this function will continue to set
+ memory but the assumption is that the value passed is in units of MB.
+
+ @param memory <em>memory</em>(in MB) of the resource]]>
+      </doc>
+    </method>
+    <method name="setMemorySize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="memory" type="long"/>
+      <doc>
+      <![CDATA[Set <em>memory</em> of the resource.
+ @param memory <em>memory</em> of the resource]]>
+      </doc>
+    </method>
+    <method name="getVirtualCores" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <em>number of virtual cpu cores</em> of the resource.
+ 
+ Virtual cores are a unit for expressing CPU parallelism. A node's capacity
+ should be configured with virtual cores equal to its number of physical
+ cores. A container should be requested with the number of cores it can
+ saturate, i.e. the average number of threads it expects to have runnable
+ at a time.
+
+ @return <em>num of virtual cpu cores</em> of the resource]]>
+      </doc>
+    </method>
+    <method name="setVirtualCores"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="vCores" type="int"/>
+      <doc>
+      <![CDATA[Set <em>number of virtual cpu cores</em> of the resource.
+ 
+ Virtual cores are a unit for expressing CPU parallelism. A node's capacity
+ should be configured with virtual cores equal to its number of physical
+ cores. A container should be requested with the number of cores it can
+ saturate, i.e. the average number of threads it expects to have runnable
+ at a time.
+
+ @param vCores <em>number of virtual cpu cores</em> of the resource]]>
+      </doc>
+    </method>
+    <method name="getResourceInformation" return="org.apache.hadoop.yarn.api.records.ResourceInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resource" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get ResourceInformation for a specified resource.
+
+ @param resource name of the resource
+ @return the ResourceInformation object for the resource]]>
+      </doc>
+    </method>
+    <method name="getResourceValue" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resource" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get the value for a specified resource. No information about the units is
+ returned.
+
+ @param resource name of the resource
+ @return the value for the resource]]>
+      </doc>
+    </method>
+    <method name="setResourceInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resource" type="java.lang.String"/>
+      <param name="resourceInformation" type="org.apache.hadoop.yarn.api.records.ResourceInformation"/>
+      <doc>
+      <![CDATA[Set the ResourceInformation object for a particular resource.
+
+ @param resource the resource for which the ResourceInformation is provided
+ @param resourceInformation ResourceInformation object]]>
+      </doc>
+    </method>
+    <method name="setResourceValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resource" type="java.lang.String"/>
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Set the value of a resource in the ResourceInformation object. The unit of
+ the value is assumed to be the one in the ResourceInformation object.
+
+ @param resource the resource for which the value is provided.
+ @param value    the value to set]]>
+      </doc>
+    </method>
+    <method name="throwExceptionWhenArrayOutOfBound"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="index" type="int"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.yarn.api.records.Resource"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFormattedString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This method is to get memory in terms of KB|MB|GB.
+ @return string containing all resources]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="castToIntSafely" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Convert long to int for a resource value safely. This method assumes
+ resource value is positive.
+
+ @param value long resource value
+ @return int resource value]]>
+      </doc>
+    </method>
+    <method name="newDefaultInformation" return="org.apache.hadoop.yarn.api.records.ResourceInformation"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="unit" type="java.lang.String"/>
+      <param name="value" type="long"/>
+      <doc>
+      <![CDATA[Create ResourceInformation with basic fields.
+ @param name Resource Type Name
+ @param unit Default unit of provided resource type
+ @param value Value associated with giveb resource
+ @return ResourceInformation object]]>
+      </doc>
+    </method>
+    <field name="resources" type="org.apache.hadoop.yarn.api.records.ResourceInformation[]"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="MEMORY_INDEX" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="VCORES_INDEX" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[<p><code>Resource</code> models a set of computer resources in the 
+ cluster.</p>
+ 
+ <p>Currently it models both <em>memory</em> and <em>CPU</em>.</p>
+ 
+ <p>The unit for memory is megabytes. CPU is modeled with virtual cores
+ (vcores), a unit for expressing parallelism. A node's capacity should
+ be configured with virtual cores equal to its number of physical cores. A
+ container should be requested with the number of cores it can saturate, i.e.
+ the average number of threads it expects to have runnable at a time.</p>
+ 
+ <p>Virtual cores take integer values and thus currently CPU-scheduling is
+ very coarse.  A complementary axis for CPU requests that represents
+ processing power will likely be added in the future to enable finer-grained
+ resource configuration.</p>
+
+ <p>Typically, applications request <code>Resource</code> of suitable
+ capability to run their component tasks.</p>
+ 
+ @see ResourceRequest
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.Resource -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ResourceAllocationRequest -->
+  <class name="ResourceAllocationRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ResourceAllocationRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ResourceAllocationRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="startTime" type="long"/>
+      <param name="endTime" type="long"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[@param startTime The start time that the capability is reserved for.
+ @param endTime The end time that the capability is reserved for.
+ @param capability {@link Resource} representing the capability of the
+                                   resource allocation.
+ @return {ResourceAllocationRequest} which represents the capability of
+ the resource allocation for a time interval.]]>
+      </doc>
+    </method>
+    <method name="getStartTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the start time that the resource is allocated.
+
+ @return the start time that the resource is allocated.]]>
+      </doc>
+    </method>
+    <method name="getEndTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the end time that the resource is allocated.
+
+ @return the end time that the resource is allocated.]]>
+      </doc>
+    </method>
+    <method name="getCapability" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the allocated resource.
+
+ @return the allocated resource.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@code ResourceAllocationRequest} represents an allocation
+ made for a reservation for the current state of the plan. This can be
+ changed for reasons such as re-planning, but will always be subject to the
+ constraints of the user contract as described by
+ {@link ReservationDefinition}
+ {@link Resource}
+
+ <p>
+ It includes:
+ <ul>
+   <li>StartTime of the allocation.</li>
+   <li>EndTime of the allocation.</li>
+   <li>{@link Resource} reserved for the allocation.</li>
+ </ul>
+
+ @see Resource]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ResourceAllocationRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest -->
+  <class name="ResourceBlacklistRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ResourceBlacklistRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="additions" type="java.util.List"/>
+      <param name="removals" type="java.util.List"/>
+    </method>
+    <method name="getBlacklistAdditions" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of resource-names which should be added to the 
+ application blacklist.
+ 
+ @return list of resource-names which should be added to the 
+         application blacklist]]>
+      </doc>
+    </method>
+    <method name="setBlacklistAdditions"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceNames" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set list of resource-names which should be added to the application blacklist.
+ 
+ @param resourceNames list of resource-names which should be added to the 
+                  application blacklist]]>
+      </doc>
+    </method>
+    <method name="getBlacklistRemovals" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the list of resource-names which should be removed from the 
+ application blacklist.
+ 
+ @return list of resource-names which should be removed from the 
+         application blacklist]]>
+      </doc>
+    </method>
+    <method name="setBlacklistRemovals"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceNames" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set list of resource-names which should be removed from the 
+ application blacklist.
+ 
+ @param resourceNames list of resource-names which should be removed from the 
+                  application blacklist]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@link ResourceBlacklistRequest} encapsulates the list of resource-names 
+ which should be added or removed from the <em>blacklist</em> of resources 
+ for the application.
+ 
+ @see ResourceRequest
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ResourceOption -->
+  <class name="ResourceOption" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ResourceOption"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ResourceOption"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resource" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="overCommitTimeout" type="int"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Negative value means no timeout.]]>
+      </doc>
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ResourceOption -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ResourceRequest -->
+  <class name="ResourceRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="ResourceRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ResourceRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="hostName" type="java.lang.String"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="numContainers" type="int"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ResourceRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="hostName" type="java.lang.String"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="numContainers" type="int"/>
+      <param name="relaxLocality" type="boolean"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ResourceRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="hostName" type="java.lang.String"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="numContainers" type="int"/>
+      <param name="relaxLocality" type="boolean"/>
+      <param name="labelExpression" type="java.lang.String"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ResourceRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="hostName" type="java.lang.String"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="numContainers" type="int"/>
+      <param name="relaxLocality" type="boolean"/>
+      <param name="labelExpression" type="java.lang.String"/>
+      <param name="executionTypeRequest" type="org.apache.hadoop.yarn.api.records.ExecutionTypeRequest"/>
+    </method>
+    <method name="clone" return="org.apache.hadoop.yarn.api.records.ResourceRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="rr" type="org.apache.hadoop.yarn.api.records.ResourceRequest"/>
+      <doc>
+      <![CDATA[Clone a ResourceRequest object (shallow copy). Please keep it loaded with
+ all (new) fields
+
+ @param rr the object to copy from
+ @return the copied object]]>
+      </doc>
+    </method>
+    <method name="newBuilder" return="org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isAnyLocation" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hostName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Check whether the given <em>host/rack</em> string represents an arbitrary
+ host name.
+
+ @param hostName <em>host/rack</em> on which the allocation is desired
+ @return whether the given <em>host/rack</em> string represents an arbitrary
+ host name]]>
+      </doc>
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>Priority</code> of the request.
+ @return <code>Priority</code> of the request]]>
+      </doc>
+    </method>
+    <method name="setPriority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <doc>
+      <![CDATA[Set the <code>Priority</code> of the request
+ @param priority <code>Priority</code> of the request]]>
+      </doc>
+    </method>
+    <method name="getResourceName" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the resource (e.g. <em>host/rack</em>) on which the allocation 
+ is desired.
+ 
+ A special value of <em>*</em> signifies that <em>any</em> resource 
+ (host/rack) is acceptable.
+ 
+ @return resource (e.g. <em>host/rack</em>) on which the allocation 
+                  is desired]]>
+      </doc>
+    </method>
+    <method name="setResourceName"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the resource name (e.g. <em>host/rack</em>) on which the allocation 
+ is desired.
+ 
+ A special value of <em>*</em> signifies that <em>any</em> resource name
+ (e.g. host/rack) is acceptable. 
+ 
+ @param resourceName (e.g. <em>host/rack</em>) on which the 
+                     allocation is desired]]>
+      </doc>
+    </method>
+    <method name="getNumContainers" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of containers required with the given specifications.
+ @return number of containers required with the given specifications]]>
+      </doc>
+    </method>
+    <method name="setNumContainers"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numContainers" type="int"/>
+      <doc>
+      <![CDATA[Set the number of containers required with the given specifications
+ @param numContainers number of containers required with the given 
+                      specifications]]>
+      </doc>
+    </method>
+    <method name="getRelaxLocality" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get whether locality relaxation is enabled with this
+ <code>ResourceRequest</code>. Defaults to true.
+ 
+ @return whether locality relaxation is enabled with this
+ <code>ResourceRequest</code>.]]>
+      </doc>
+    </method>
+    <method name="setExecutionTypeRequest"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="execSpec" type="org.apache.hadoop.yarn.api.records.ExecutionTypeRequest"/>
+      <doc>
+      <![CDATA[Set the <code>ExecutionTypeRequest</code> of the requested container.
+
+ @param execSpec
+          ExecutionTypeRequest of the requested container]]>
+      </doc>
+    </method>
+    <method name="getExecutionTypeRequest" return="org.apache.hadoop.yarn.api.records.ExecutionTypeRequest"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get whether locality relaxation is enabled with this
+ <code>ResourceRequest</code>. Defaults to true.
+
+ @return whether locality relaxation is enabled with this
+ <code>ResourceRequest</code>.]]>
+      </doc>
+    </method>
+    <method name="setRelaxLocality"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="relaxLocality" type="boolean"/>
+      <doc>
+      <![CDATA[<p>For a request at a network hierarchy level, set whether locality can be relaxed
+ to that level and beyond.<p>
+ 
+ <p>If the flag is off on a rack-level <code>ResourceRequest</code>,
+ containers at that request's priority will not be assigned to nodes on that
+ request's rack unless requests specifically for those nodes have also been
+ submitted.<p>
+ 
+ <p>If the flag is off on an {@link ResourceRequest#ANY}-level
+ <code>ResourceRequest</code>, containers at that request's priority will
+ only be assigned on racks for which specific requests have also been
+ submitted.<p>
+ 
+ <p>For example, to request a container strictly on a specific node, the
+ corresponding rack-level and any-level requests should have locality
+ relaxation set to false.  Similarly, to request a container strictly on a
+ specific rack, the corresponding any-level request should have locality
+ relaxation set to false.<p>
+ 
+ @param relaxLocality whether locality relaxation is enabled with this
+ <code>ResourceRequest</code>.]]>
+      </doc>
+    </method>
+    <method name="getNodeLabelExpression" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get node-label-expression for this Resource Request. If this is set, all
+ containers allocated to satisfy this resource-request will be only on those
+ nodes that satisfy this node-label-expression.
+  
+ Please note that node label expression now can only take effect when the
+ resource request has resourceName = ANY
+ 
+ @return node-label-expression]]>
+      </doc>
+    </method>
+    <method name="setNodeLabelExpression"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodelabelExpression" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set node label expression of this resource request. Now only support
+ specifying a single node label. In the future we will support more complex
+ node label expression specification like {@code AND(&&), OR(||)}, etc.
+ 
+ Any please note that node label expression now can only take effect when
+ the resource request has resourceName = ANY
+ 
+ @param nodelabelExpression
+          node-label-expression of this ResourceRequest]]>
+      </doc>
+    </method>
+    <method name="getAllocationRequestId" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the optional <em>ID</em> corresponding to this allocation request. This
+ ID is an identifier for different {@code ResourceRequest}s from the <b>same
+ application</b>. The allocated {@code Container}(s) received as part of the
+ {@code AllocateResponse} response will have the ID corresponding to the
+ original {@code ResourceRequest} for which the RM made the allocation.
+ <p>
+ The scheduler may return multiple {@code AllocateResponse}s corresponding
+ to the same ID as and when scheduler allocates {@code Container}(s).
+ <b>Applications</b> can continue to completely ignore the returned ID in
+ the response and use the allocation for any of their outstanding requests.
+ <p>
+ If one wishes to replace an entire {@code ResourceRequest} corresponding to
+ a specific ID, they can simply cancel the corresponding {@code
+ ResourceRequest} and submit a new one afresh.
+
+ @return the <em>ID</em> corresponding to this allocation request.]]>
+      </doc>
+    </method>
+    <method name="setAllocationRequestId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allocationRequestID" type="long"/>
+      <doc>
+      <![CDATA[Set the optional <em>ID</em> corresponding to this allocation request. This
+ ID is an identifier for different {@code ResourceRequest}s from the <b>same
+ application</b>. The allocated {@code Container}(s) received as part of the
+ {@code AllocateResponse} response will have the ID corresponding to the
+ original {@code ResourceRequest} for which the RM made the allocation.
+ <p>
+ The scheduler may return multiple {@code AllocateResponse}s corresponding
+ to the same ID as and when scheduler allocates {@code Container}(s).
+ <b>Applications</b> can continue to completely ignore the returned ID in
+ the response and use the allocation for any of their outstanding requests.
+ <p>
+ If one wishes to replace an entire {@code ResourceRequest} corresponding to
+ a specific ID, they can simply cancel the corresponding {@code
+ ResourceRequest} and submit a new one afresh.
+ <p>
+ If the ID is not set, scheduler will continue to work as previously and all
+ allocated {@code Container}(s) will have the default ID, -1.
+
+ @param allocationRequestID the <em>ID</em> corresponding to this allocation
+                            request.]]>
+      </doc>
+    </method>
+    <method name="setCapability"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Set the <code>Resource</code> capability of the request.
+ @param capability <code>Resource</code> capability of the request]]>
+      </doc>
+    </method>
+    <method name="getCapability" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>Resource</code> capability of the request.
+ @return <code>Resource</code> capability of the request]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.yarn.api.records.ResourceRequest"/>
+    </method>
+    <field name="ANY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The constant string representing no locality.
+ It should be used by all references that want to pass an arbitrary host
+ name in.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[{@code ResourceRequest} represents the request made
+ by an application to the {@code ResourceManager}
+ to obtain various {@code Container} allocations.
+ <p>
+ It includes:
+ <ul>
+   <li>{@link Priority} of the request.</li>
+   <li>
+     The <em>name</em> of the host or rack on which the allocation is
+     desired. A special value of <em>*</em> signifies that
+     <em>any</em> host/rack is acceptable to the application.
+   </li>
+   <li>{@link Resource} required for each request.</li>
+   <li>
+     Number of containers, of above specifications, which are required
+     by the application.
+   </li>
+   <li>
+     A boolean <em>relaxLocality</em> flag, defaulting to {@code true},
+     which tells the {@code ResourceManager} if the application wants
+     locality to be loose (i.e. allows fall-through to rack or <em>any</em>)
+     or strict (i.e. specify hard constraint on resource allocation).
+   </li>
+ </ul>
+ 
+ @see Resource
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ResourceRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestBuilder -->
+  <class name="ResourceRequest.ResourceRequestBuilder" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="priority" return="org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <doc>
+      <![CDATA[Set the <code>priority</code> of the request.
+ @see ResourceRequest#setPriority(Priority)
+ @param priority <code>priority</code> of the request
+ @return {@link ResourceRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="resourceName" return="org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <code>resourceName</code> of the request.
+ @see ResourceRequest#setResourceName(String)
+ @param resourceName <code>resourceName</code> of the request
+ @return {@link ResourceRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="capability" return="org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Set the <code>capability</code> of the request.
+ @see ResourceRequest#setCapability(Resource)
+ @param capability <code>capability</code> of the request
+ @return {@link ResourceRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="numContainers" return="org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numContainers" type="int"/>
+      <doc>
+      <![CDATA[Set the <code>numContainers</code> of the request.
+ @see ResourceRequest#setNumContainers(int)
+ @param numContainers <code>numContainers</code> of the request
+ @return {@link ResourceRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="relaxLocality" return="org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="relaxLocality" type="boolean"/>
+      <doc>
+      <![CDATA[Set the <code>relaxLocality</code> of the request.
+ @see ResourceRequest#setRelaxLocality(boolean)
+ @param relaxLocality <code>relaxLocality</code> of the request
+ @return {@link ResourceRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="nodeLabelExpression" return="org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeLabelExpression" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the <code>nodeLabelExpression</code> of the request.
+ @see ResourceRequest#setNodeLabelExpression(String)
+ @param nodeLabelExpression
+          <code>nodeLabelExpression</code> of the request
+ @return {@link ResourceRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="executionTypeRequest" return="org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="executionTypeRequest" type="org.apache.hadoop.yarn.api.records.ExecutionTypeRequest"/>
+      <doc>
+      <![CDATA[Set the <code>executionTypeRequest</code> of the request.
+ @see ResourceRequest#setExecutionTypeRequest(
+ ExecutionTypeRequest)
+ @param executionTypeRequest
+          <code>executionTypeRequest</code> of the request
+ @return {@link ResourceRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="executionType" return="org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="executionType" type="org.apache.hadoop.yarn.api.records.ExecutionType"/>
+      <doc>
+      <![CDATA[Set the <code>executionTypeRequest</code> of the request with 'ensure
+ execution type' flag set to true.
+ @see ResourceRequest#setExecutionTypeRequest(
+ ExecutionTypeRequest)
+ @param executionType <code>executionType</code> of the request.
+ @return {@link ResourceRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="allocationRequestId" return="org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allocationRequestId" type="long"/>
+      <doc>
+      <![CDATA[Set the <code>allocationRequestId</code> of the request.
+ @see ResourceRequest#setAllocationRequestId(long)
+ @param allocationRequestId
+          <code>allocationRequestId</code> of the request
+ @return {@link ResourceRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="build" return="org.apache.hadoop.yarn.api.records.ResourceRequest"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return generated {@link ResourceRequest} object.
+ @return {@link ResourceRequest}]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Class to construct instances of {@link ResourceRequest} with specific
+ options.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestBuilder -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestComparator -->
+  <class name="ResourceRequest.ResourceRequestComparator" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.Comparator"/>
+    <implements name="java.io.Serializable"/>
+    <constructor name="ResourceRequestComparator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="compare" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="r1" type="org.apache.hadoop.yarn.api.records.ResourceRequest"/>
+      <param name="r2" type="org.apache.hadoop.yarn.api.records.ResourceRequest"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ResourceRequest.ResourceRequestComparator -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ResourceSizing -->
+  <class name="ResourceSizing" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ResourceSizing"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ResourceSizing"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resources" type="org.apache.hadoop.yarn.api.records.Resource"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ResourceSizing"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numAllocations" type="int"/>
+      <param name="resources" type="org.apache.hadoop.yarn.api.records.Resource"/>
+    </method>
+    <method name="getNumAllocations" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setNumAllocations"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="numAllocations" type="int"/>
+    </method>
+    <method name="getResources" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setResources"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resources" type="org.apache.hadoop.yarn.api.records.Resource"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <doc>
+    <![CDATA[{@code ResourceSizing} contains information for the size of a
+ {@link SchedulingRequest}, such as the number of requested allocations and
+ the resources for each allocation.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ResourceSizing -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ResourceUtilization -->
+  <class name="ResourceUtilization" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="ResourceUtilization"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ResourceUtilization"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pmem" type="int"/>
+      <param name="vmem" type="int"/>
+      <param name="cpu" type="float"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ResourceUtilization"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pmem" type="int"/>
+      <param name="vmem" type="int"/>
+      <param name="cpu" type="float"/>
+      <param name="customResources" type="java.util.Map"/>
+    </method>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.ResourceUtilization"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceUtil" type="org.apache.hadoop.yarn.api.records.ResourceUtilization"/>
+    </method>
+    <method name="getVirtualMemory" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get used <em>virtual memory</em>.
+
+ @return <em>virtual memory</em> in MB]]>
+      </doc>
+    </method>
+    <method name="setVirtualMemory"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="vmem" type="int"/>
+      <doc>
+      <![CDATA[Set used <em>virtual memory</em>.
+
+ @param vmem <em>virtual memory</em> in MB]]>
+      </doc>
+    </method>
+    <method name="getPhysicalMemory" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <em>physical memory</em>.
+
+ @return <em>physical memory</em> in MB]]>
+      </doc>
+    </method>
+    <method name="setPhysicalMemory"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pmem" type="int"/>
+      <doc>
+      <![CDATA[Set <em>physical memory</em>.
+
+ @param pmem <em>physical memory</em> in MB]]>
+      </doc>
+    </method>
+    <method name="getCPU" return="float"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get <em>CPU</em> utilization (The amount of vcores used).
+
+ @return <em>CPU utilization</em>]]>
+      </doc>
+    </method>
+    <method name="setCPU"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cpu" type="float"/>
+      <doc>
+      <![CDATA[Set <em>CPU</em> utilization (The amount of vcores used).
+
+ @param cpu <em>CPU utilization</em>]]>
+      </doc>
+    </method>
+    <method name="getCustomResource" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Get <em>custom resource</em> utilization
+ (The amount of custom resource used).
+
+ @param resourceName <em>resourceName of custom resource</em>
+ @return <em>resourceName utilization</em>]]>
+      </doc>
+    </method>
+    <method name="getCustomResources" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setCustomResources"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="customResources" type="java.util.Map"/>
+    </method>
+    <method name="setCustomResource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="utilization" type="float"/>
+      <doc>
+      <![CDATA[Set <em>custom resource</em> utilization
+ (The amount of custom resource used).
+ @param resourceName <em>resourceName</em>
+ @param utilization <em>utilization of custom resource</em>]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="addTo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pmem" type="int"/>
+      <param name="vmem" type="int"/>
+      <param name="cpu" type="float"/>
+      <doc>
+      <![CDATA[Add utilization to the current one.
+ @param pmem Physical memory used to add.
+ @param vmem Virtual memory used to add.
+ @param cpu CPU utilization to add.]]>
+      </doc>
+    </method>
+    <method name="addTo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pmem" type="int"/>
+      <param name="vmem" type="int"/>
+      <param name="cpu" type="float"/>
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="utilization" type="float"/>
+      <doc>
+      <![CDATA[Add utilization to the current one.
+ @param pmem Physical memory used to add.
+ @param vmem Virtual memory used to add.
+ @param cpu CPU utilization to add.
+ @param resourceName of custom resource to add.
+ @param utilization of custom resource to add.]]>
+      </doc>
+    </method>
+    <method name="subtractFrom"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pmem" type="int"/>
+      <param name="vmem" type="int"/>
+      <param name="cpu" type="float"/>
+      <doc>
+      <![CDATA[Subtract utilization from the current one.
+ @param pmem Physical memory to be subtracted.
+ @param vmem Virtual memory to be subtracted.
+ @param cpu CPU utilization to be subtracted.]]>
+      </doc>
+    </method>
+    <method name="subtractFrom"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pmem" type="int"/>
+      <param name="vmem" type="int"/>
+      <param name="cpu" type="float"/>
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="utilization" type="float"/>
+      <doc>
+      <![CDATA[Subtract utilization from the current one.
+ @param pmem Physical memory to be subtracted.
+ @param vmem Virtual memory to be subtracted.
+ @param cpu CPU utilization to be subtracted.
+ @param resourceName of custom resource to be subtracted.
+ @param utilization of custom resource to be subtracted.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ <code>ResourceUtilization</code> models the utilization of a set of computer
+ resources in the cluster.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ResourceUtilization -->
+  <!-- start class org.apache.hadoop.yarn.api.records.SchedulingRequest -->
+  <class name="SchedulingRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SchedulingRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.SchedulingRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allocationRequestId" type="long"/>
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="executionType" type="org.apache.hadoop.yarn.api.records.ExecutionTypeRequest"/>
+      <param name="allocationTags" type="java.util.Set"/>
+      <param name="resourceSizing" type="org.apache.hadoop.yarn.api.records.ResourceSizing"/>
+      <param name="placementConstraintExpression" type="org.apache.hadoop.yarn.api.resource.PlacementConstraint"/>
+    </method>
+    <method name="newBuilder" return="org.apache.hadoop.yarn.api.records.SchedulingRequest.SchedulingRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocationRequestId" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setAllocationRequestId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allocationRequestId" type="long"/>
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setPriority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+    </method>
+    <method name="getExecutionType" return="org.apache.hadoop.yarn.api.records.ExecutionTypeRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setExecutionType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="executionType" type="org.apache.hadoop.yarn.api.records.ExecutionTypeRequest"/>
+    </method>
+    <method name="getAllocationTags" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setAllocationTags"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allocationTags" type="java.util.Set"/>
+    </method>
+    <method name="getResourceSizing" return="org.apache.hadoop.yarn.api.records.ResourceSizing"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setResourceSizing"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceSizing" type="org.apache.hadoop.yarn.api.records.ResourceSizing"/>
+    </method>
+    <method name="getPlacementConstraint" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setPlacementConstraint"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="placementConstraint" type="org.apache.hadoop.yarn.api.resource.PlacementConstraint"/>
+    </method>
+    <doc>
+    <![CDATA[{@code SchedulingRequest} represents a request made by an application to the
+ {@code ResourceManager} to obtain an allocation. It is similar to the
+ {@link ResourceRequest}. However, it is more complete than the latter, as it
+ allows applications to specify allocation tags (e.g., to express that an
+ allocation belongs to {@code Spark} or is an {@code HBase-master}), as well
+ as involved {@link PlacementConstraint}s (e.g., anti-affinity between Spark
+ and HBase allocations).
+
+ The size specification of the allocation is in {@code ResourceSizing}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.SchedulingRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.records.SchedulingRequest.SchedulingRequestBuilder -->
+  <class name="SchedulingRequest.SchedulingRequestBuilder" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="allocationRequestId" return="org.apache.hadoop.yarn.api.records.SchedulingRequest.SchedulingRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allocationRequestId" type="long"/>
+      <doc>
+      <![CDATA[Set the <code>allocationRequestId</code> of the request.
+
+ @see SchedulingRequest#setAllocationRequestId(long)
+ @param allocationRequestId <code>allocationRequestId</code> of the
+          request
+ @return {@link SchedulingRequest.SchedulingRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="priority" return="org.apache.hadoop.yarn.api.records.SchedulingRequest.SchedulingRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <doc>
+      <![CDATA[Set the <code>priority</code> of the request.
+
+ @param priority <code>priority</code> of the request
+ @return {@link SchedulingRequest.SchedulingRequestBuilder}
+ @see SchedulingRequest#setPriority(Priority)]]>
+      </doc>
+    </method>
+    <method name="executionType" return="org.apache.hadoop.yarn.api.records.SchedulingRequest.SchedulingRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="executionType" type="org.apache.hadoop.yarn.api.records.ExecutionTypeRequest"/>
+      <doc>
+      <![CDATA[Set the <code>executionType</code> of the request.
+
+ @see SchedulingRequest#setExecutionType(ExecutionTypeRequest)
+ @param executionType <code>executionType</code> of the request
+ @return {@link SchedulingRequest.SchedulingRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="allocationTags" return="org.apache.hadoop.yarn.api.records.SchedulingRequest.SchedulingRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allocationTags" type="java.util.Set"/>
+      <doc>
+      <![CDATA[Set the <code>allocationTags</code> of the request.
+
+ @see SchedulingRequest#setAllocationTags(Set)
+ @param allocationTags <code>allocationsTags</code> of the request
+ @return {@link SchedulingRequest.SchedulingRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="resourceSizing" return="org.apache.hadoop.yarn.api.records.SchedulingRequest.SchedulingRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="resourceSizing" type="org.apache.hadoop.yarn.api.records.ResourceSizing"/>
+      <doc>
+      <![CDATA[Set the <code>executionType</code> of the request.
+
+ @see SchedulingRequest#setResourceSizing(ResourceSizing)
+ @param resourceSizing <code>resourceSizing</code> of the request
+ @return {@link SchedulingRequest.SchedulingRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="placementConstraintExpression" return="org.apache.hadoop.yarn.api.records.SchedulingRequest.SchedulingRequestBuilder"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="placementConstraintExpression" type="org.apache.hadoop.yarn.api.resource.PlacementConstraint"/>
+      <doc>
+      <![CDATA[Set the <code>placementConstraintExpression</code> of the request.
+
+ @see SchedulingRequest#setPlacementConstraint(
+      PlacementConstraint)
+ @param placementConstraintExpression <code>placementConstraints</code> of
+          the request
+ @return {@link SchedulingRequest.SchedulingRequestBuilder}]]>
+      </doc>
+    </method>
+    <method name="build" return="org.apache.hadoop.yarn.api.records.SchedulingRequest"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return generated {@link SchedulingRequest} object.
+
+ @return {@link SchedulingRequest}]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Class to construct instances of {@link SchedulingRequest} with specific
+ options.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.SchedulingRequest.SchedulingRequestBuilder -->
+  <!-- start class org.apache.hadoop.yarn.api.records.ShellContainerCommand -->
+  <class name="ShellContainerCommand" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.ShellContainerCommand[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.ShellContainerCommand"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Enumeration of various signal container commands.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.ShellContainerCommand -->
+  <!-- start class org.apache.hadoop.yarn.api.records.SignalContainerCommand -->
+  <class name="SignalContainerCommand" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.SignalContainerCommand[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.SignalContainerCommand"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Enumeration of various signal container commands.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.SignalContainerCommand -->
+  <!-- start class org.apache.hadoop.yarn.api.records.StrictPreemptionContract -->
+  <class name="StrictPreemptionContract" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="StrictPreemptionContract"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getContainers" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the set of {@link PreemptionContainer} specifying containers owned by
+ the <code>ApplicationMaster</code> that may be reclaimed by the
+ <code>ResourceManager</code>.
+ @return the set of {@link ContainerId} to be preempted.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Enumeration of particular allocations to be reclaimed. The platform will
+ reclaim exactly these resources, so the <code>ApplicationMaster</code> (AM)
+ may attempt to checkpoint work or adjust its execution plan to accommodate
+ it. In contrast to {@link PreemptionContract}, the AM has no flexibility in
+ selecting which resources to return to the cluster.
+ @see PreemptionMessage]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.StrictPreemptionContract -->
+  <!-- start class org.apache.hadoop.yarn.api.records.Token -->
+  <class name="Token" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Token"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getIdentifier" return="java.nio.ByteBuffer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the token identifier.
+ @return token identifier]]>
+      </doc>
+    </method>
+    <method name="getPassword" return="java.nio.ByteBuffer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the token password
+ @return token password]]>
+      </doc>
+    </method>
+    <method name="getKind" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the token kind.
+ @return token kind]]>
+      </doc>
+    </method>
+    <method name="getService" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the service to which the token is allocated.
+ @return service to which the token is allocated]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p><code>Token</code> is the security entity used by the framework
+ to verify authenticity of any resource.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.Token -->
+  <!-- start class org.apache.hadoop.yarn.api.records.UpdateContainerError -->
+  <class name="UpdateContainerError" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UpdateContainerError"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.UpdateContainerError"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reason" type="java.lang.String"/>
+      <param name="updateContainerRequest" type="org.apache.hadoop.yarn.api.records.UpdateContainerRequest"/>
+    </method>
+    <method name="getReason" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get reason why the update request was not satisfiable.
+ @return Reason]]>
+      </doc>
+    </method>
+    <method name="setReason"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="reason" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set reason why the update request was not satisfiable.
+ @param reason Reason]]>
+      </doc>
+    </method>
+    <method name="getCurrentContainerVersion" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get current container version.
+ @return Current container Version.]]>
+      </doc>
+    </method>
+    <method name="setCurrentContainerVersion"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="currentVersion" type="int"/>
+      <doc>
+      <![CDATA[Set current container version.
+ @param currentVersion Current container version.]]>
+      </doc>
+    </method>
+    <method name="getUpdateContainerRequest" return="org.apache.hadoop.yarn.api.records.UpdateContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the {@code UpdateContainerRequest} that was not satisfiable.
+ @return UpdateContainerRequest]]>
+      </doc>
+    </method>
+    <method name="setUpdateContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="updateContainerRequest" type="org.apache.hadoop.yarn.api.records.UpdateContainerRequest"/>
+      <doc>
+      <![CDATA[Set the {@code UpdateContainerRequest} that was not satisfiable.
+ @param updateContainerRequest Update Container Request]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <doc>
+    <![CDATA[{@code UpdateContainerError} is used by the Scheduler to notify the
+ ApplicationMaster of an UpdateContainerRequest it cannot satisfy due to
+ an error in the request. It includes the update request as well as
+ a reason for why the request was not satisfiable.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.UpdateContainerError -->
+  <!-- start class org.apache.hadoop.yarn.api.records.UpdateContainerRequest -->
+  <class name="UpdateContainerRequest" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UpdateContainerRequest"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.UpdateContainerRequest"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="version" type="int"/>
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="updateType" type="org.apache.hadoop.yarn.api.records.ContainerUpdateType"/>
+      <param name="targetCapability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="targetExecutionType" type="org.apache.hadoop.yarn.api.records.ExecutionType"/>
+    </method>
+    <method name="getContainerVersion" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerId</code> of the container.
+ @return <code>ContainerId</code> of the container]]>
+      </doc>
+    </method>
+    <method name="setContainerVersion"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerVersion" type="int"/>
+      <doc>
+      <![CDATA[Set the current version of the container.
+ @param containerVersion of the container]]>
+      </doc>
+    </method>
+    <method name="getContainerUpdateType" return="org.apache.hadoop.yarn.api.records.ContainerUpdateType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerUpdateType</code> of the container.
+ @return <code>ContainerUpdateType</code> of the container.]]>
+      </doc>
+    </method>
+    <method name="setContainerUpdateType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="updateType" type="org.apache.hadoop.yarn.api.records.ContainerUpdateType"/>
+      <doc>
+      <![CDATA[Set the <code>ContainerUpdateType</code> of the container.
+ @param updateType of the Container]]>
+      </doc>
+    </method>
+    <method name="getContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerId</code> of the container.
+ @return <code>ContainerId</code> of the container]]>
+      </doc>
+    </method>
+    <method name="setContainerId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <doc>
+      <![CDATA[Set the <code>ContainerId</code> of the container.
+ @param containerId <code>ContainerId</code> of the container]]>
+      </doc>
+    </method>
+    <method name="getExecutionType" return="org.apache.hadoop.yarn.api.records.ExecutionType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the target <code>ExecutionType</code> of the container.
+ @return <code>ExecutionType</code> of the container]]>
+      </doc>
+    </method>
+    <method name="setExecutionType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="executionType" type="org.apache.hadoop.yarn.api.records.ExecutionType"/>
+      <doc>
+      <![CDATA[Set the target <code>ExecutionType</code> of the container.
+ @param executionType <code>ExecutionType</code> of the container]]>
+      </doc>
+    </method>
+    <method name="setCapability"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Set the <code>Resource</code> capability of the request.
+ @param capability <code>Resource</code> capability of the request]]>
+      </doc>
+    </method>
+    <method name="getCapability" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>Resource</code> capability of the request.
+ @return <code>Resource</code> capability of the request]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <doc>
+    <![CDATA[{@code UpdateContainerRequest} represents the request made by an
+ application to the {@code ResourceManager} to update an attribute of a
+ {@code Container} such as its Resource allocation or (@code ExecutionType}
+ <p>
+ It includes:
+ <ul>
+   <li>version for the container.</li>
+   <li>{@link ContainerId} for the container.</li>
+   <li>
+     {@link Resource} capability of the container after the update request
+     is completed.
+   </li>
+   <li>
+     {@link ExecutionType} of the container after the update request is
+     completed.
+   </li>
+ </ul>
+
+ Update rules:
+ <ul>
+   <li>
+     Currently only ONE aspect of the container can be updated per request
+     (user can either update Capability OR ExecutionType in one request..
+     not both).
+   </li>
+   <li>
+     There must be only 1 update request per container in an allocate call.
+   </li>
+   <li>
+     If a new update request is sent for a container (in a subsequent allocate
+     call) before the first one is satisfied by the Scheduler, it will
+     overwrite the previous request.
+   </li>
+ </ul>
+ @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.UpdateContainerRequest -->
+  <!-- start class org.apache.hadoop.yarn.api.records.UpdatedContainer -->
+  <class name="UpdatedContainer" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UpdatedContainer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.UpdatedContainer"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="updateType" type="org.apache.hadoop.yarn.api.records.ContainerUpdateType"/>
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <doc>
+      <![CDATA[Static Factory method.
+
+ @param updateType ContainerUpdateType
+ @param container Container
+ @return UpdatedContainer]]>
+      </doc>
+    </method>
+    <method name="getUpdateType" return="org.apache.hadoop.yarn.api.records.ContainerUpdateType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ContainerUpdateType</code>.
+ @return ContainerUpdateType]]>
+      </doc>
+    </method>
+    <method name="setUpdateType"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="updateType" type="org.apache.hadoop.yarn.api.records.ContainerUpdateType"/>
+      <doc>
+      <![CDATA[Set the <code>ContainerUpdateType</code>.
+ @param updateType ContainerUpdateType]]>
+      </doc>
+    </method>
+    <method name="getContainer" return="org.apache.hadoop.yarn.api.records.Container"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>Container</code>.
+ @return Container]]>
+      </doc>
+    </method>
+    <method name="setContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <doc>
+      <![CDATA[Set the <code>Container</code>.
+ @param container Container]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <doc>
+    <![CDATA[An object that encapsulates an updated container and the
+ type of Update.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.UpdatedContainer -->
+  <!-- start class org.apache.hadoop.yarn.api.records.URL -->
+  <class name="URL" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="URL"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.URL"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scheme" type="java.lang.String"/>
+      <param name="host" type="java.lang.String"/>
+      <param name="port" type="int"/>
+      <param name="file" type="java.lang.String"/>
+    </method>
+    <method name="getScheme" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the scheme of the URL.
+ @return scheme of the URL]]>
+      </doc>
+    </method>
+    <method name="setScheme"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scheme" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the scheme of the URL
+ @param scheme scheme of the URL]]>
+      </doc>
+    </method>
+    <method name="getUserInfo" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user info of the URL.
+ @return user info of the URL]]>
+      </doc>
+    </method>
+    <method name="setUserInfo"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="userInfo" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the user info of the URL.
+ @param userInfo user info of the URL]]>
+      </doc>
+    </method>
+    <method name="getHost" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the host of the URL.
+ @return host of the URL]]>
+      </doc>
+    </method>
+    <method name="setHost"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="host" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the host of the URL.
+ @param host host of the URL]]>
+      </doc>
+    </method>
+    <method name="getPort" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the port of the URL.
+ @return port of the URL]]>
+      </doc>
+    </method>
+    <method name="setPort"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="port" type="int"/>
+      <doc>
+      <![CDATA[Set the port of the URL
+ @param port port of the URL]]>
+      </doc>
+    </method>
+    <method name="getFile" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the file of the URL.
+ @return file of the URL]]>
+      </doc>
+    </method>
+    <method name="setFile"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="file" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the file of the URL.
+ @param file file of the URL]]>
+      </doc>
+    </method>
+    <method name="toPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="URISyntaxException" type="java.net.URISyntaxException"/>
+    </method>
+    <method name="fromURI" return="org.apache.hadoop.yarn.api.records.URL"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+    </method>
+    <method name="fromPath" return="org.apache.hadoop.yarn.api.records.URL"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <doc>
+    <![CDATA[<p><code>URL</code> represents a serializable {@link java.net.URL}.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.URL -->
+  <!-- start class org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState -->
+  <class name="YarnApplicationAttemptState" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Enumeration of various states of a <code>RMAppAttempt</code>.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState -->
+  <!-- start class org.apache.hadoop.yarn.api.records.YarnApplicationState -->
+  <class name="YarnApplicationState" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.YarnApplicationState[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.YarnApplicationState"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[Enumeration of various states of an <code>ApplicationMaster</code>.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.YarnApplicationState -->
+  <!-- start class org.apache.hadoop.yarn.api.records.YarnClusterMetrics -->
+  <class name="YarnClusterMetrics" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YarnClusterMetrics"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNumNodeManagers" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of <code>NodeManager</code>s in the cluster.
+ @return number of <code>NodeManager</code>s in the cluster]]>
+      </doc>
+    </method>
+    <method name="getNumDecommissionedNodeManagers" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of <code>DecommissionedNodeManager</code>s in the cluster.
+ 
+ @return number of <code>DecommissionedNodeManager</code>s in the cluster]]>
+      </doc>
+    </method>
+    <method name="getNumActiveNodeManagers" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of <code>ActiveNodeManager</code>s in the cluster.
+ 
+ @return number of <code>ActiveNodeManager</code>s in the cluster]]>
+      </doc>
+    </method>
+    <method name="getNumLostNodeManagers" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of <code>LostNodeManager</code>s in the cluster.
+ 
+ @return number of <code>LostNodeManager</code>s in the cluster]]>
+      </doc>
+    </method>
+    <method name="getNumUnhealthyNodeManagers" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of <code>UnhealthyNodeManager</code>s in the cluster.
+ 
+ @return number of <code>UnhealthyNodeManager</code>s in the cluster]]>
+      </doc>
+    </method>
+    <method name="getNumRebootedNodeManagers" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the number of <code>RebootedNodeManager</code>s in the cluster.
+ 
+ @return number of <code>RebootedNodeManager</code>s in the cluster]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p><code>YarnClusterMetrics</code> represents cluster metrics.</p>
+ 
+ <p>Currently only number of <code>NodeManager</code>s is provided.</p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.YarnClusterMetrics -->
+</package>
+<package name="org.apache.hadoop.yarn.api.records.impl">
+</package>
+<package name="org.apache.hadoop.yarn.api.records.timeline">
+  <!-- start class org.apache.hadoop.yarn.api.records.timeline.TimelineAbout -->
+  <class name="TimelineAbout" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineAbout"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="TimelineAbout" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAbout" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setAbout"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="about" type="java.lang.String"/>
+    </method>
+    <method name="getTimelineServiceVersion" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setTimelineServiceVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineServiceVersion" type="java.lang.String"/>
+    </method>
+    <method name="getTimelineServiceBuildVersion" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setTimelineServiceBuildVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineServiceBuildVersion" type="java.lang.String"/>
+    </method>
+    <method name="getTimelineServiceVersionBuiltOn" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setTimelineServiceVersionBuiltOn"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineServiceVersionBuiltOn" type="java.lang.String"/>
+    </method>
+    <method name="getHadoopVersion" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setHadoopVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hadoopVersion" type="java.lang.String"/>
+    </method>
+    <method name="getHadoopBuildVersion" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setHadoopBuildVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hadoopBuildVersion" type="java.lang.String"/>
+    </method>
+    <method name="getHadoopVersionBuiltOn" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setHadoopVersionBuiltOn"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hadoopVersionBuiltOn" type="java.lang.String"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timeline.TimelineAbout -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timeline.TimelineDelegationTokenResponse -->
+  <class name="TimelineDelegationTokenResponse" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineDelegationTokenResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getType" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="java.lang.String"/>
+    </method>
+    <method name="getContent" return="java.lang.Object"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setContent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="content" type="java.lang.Object"/>
+    </method>
+    <doc>
+    <![CDATA[The response of delegation token related request]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timeline.TimelineDelegationTokenResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timeline.TimelineDomain -->
+  <class name="TimelineDomain" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineDomain"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the domain ID
+ 
+ @return the domain ID]]>
+      </doc>
+    </method>
+    <method name="setId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="id" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the domain ID
+ 
+ @param id the domain ID]]>
+      </doc>
+    </method>
+    <method name="getDescription" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the domain description
+ 
+ @return the domain description]]>
+      </doc>
+    </method>
+    <method name="setDescription"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="description" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the domain description
+ 
+ @param description the domain description]]>
+      </doc>
+    </method>
+    <method name="getOwner" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the domain owner
+ 
+ @return the domain owner]]>
+      </doc>
+    </method>
+    <method name="setOwner"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="owner" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the domain owner. The user doesn't need to set it, which will
+ automatically set to the user who puts the domain.
+ 
+ @param owner the domain owner]]>
+      </doc>
+    </method>
+    <method name="getReaders" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the reader (and/or reader group) list string
+ 
+ @return the reader (and/or reader group) list string]]>
+      </doc>
+    </method>
+    <method name="setReaders"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="readers" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the reader (and/or reader group) list string
+ 
+ @param readers the reader (and/or reader group) list string]]>
+      </doc>
+    </method>
+    <method name="getWriters" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the writer (and/or writer group) list string
+ 
+ @return the writer (and/or writer group) list string]]>
+      </doc>
+    </method>
+    <method name="setWriters"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="writers" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the writer (and/or writer group) list string
+ 
+ @param writers the writer (and/or writer group) list string]]>
+      </doc>
+    </method>
+    <method name="getCreatedTime" return="java.lang.Long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the created time of the domain
+ 
+ @return the created time of the domain]]>
+      </doc>
+    </method>
+    <method name="setCreatedTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="createdTime" type="java.lang.Long"/>
+      <doc>
+      <![CDATA[Set the created time of the domain
+ 
+ @param createdTime the created time of the domain]]>
+      </doc>
+    </method>
+    <method name="getModifiedTime" return="java.lang.Long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the modified time of the domain
+ 
+ @return the modified time of the domain]]>
+      </doc>
+    </method>
+    <method name="setModifiedTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="modifiedTime" type="java.lang.Long"/>
+      <doc>
+      <![CDATA[Set the modified time of the domain
+ 
+ @param modifiedTime the modified time of the domain]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ This class contains the information about a timeline domain, which is used
+ to a user to host a number of timeline entities, isolating them from others'.
+ The user can also define the reader and writer users/groups for the the
+ domain, which is used to control the access to its entities.
+ </p>
+ 
+ <p>
+ The reader and writer users/groups pattern that the user can supply is the
+ same as what <code>AccessControlList</code> takes.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timeline.TimelineDomain -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timeline.TimelineDomains -->
+  <class name="TimelineDomains" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineDomains"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getDomains" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a list of domains
+ 
+ @return a list of domains]]>
+      </doc>
+    </method>
+    <method name="addDomain"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="domain" type="org.apache.hadoop.yarn.api.records.timeline.TimelineDomain"/>
+      <doc>
+      <![CDATA[Add a single domain into the existing domain list
+ 
+ @param domain
+          a single domain]]>
+      </doc>
+    </method>
+    <method name="addDomains"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="domains" type="java.util.List"/>
+      <doc>
+      <![CDATA[All a list of domains into the existing domain list
+ 
+ @param domains
+          a list of domains]]>
+      </doc>
+    </method>
+    <method name="setDomains"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="domains" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the domain list to the given list of domains
+ 
+ @param domains
+          a list of domains]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The class that hosts a list of timeline domains.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timeline.TimelineDomains -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timeline.TimelineEntities -->
+  <class name="TimelineEntities" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineEntities"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getEntities" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a list of entities
+ 
+ @return a list of entities]]>
+      </doc>
+    </method>
+    <method name="addEntity"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entity" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEntity"/>
+      <doc>
+      <![CDATA[Add a single entity into the existing entity list
+ 
+ @param entity
+          a single entity]]>
+      </doc>
+    </method>
+    <method name="addEntities"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entities" type="java.util.List"/>
+      <doc>
+      <![CDATA[All a list of entities into the existing entity list
+ 
+ @param entities
+          a list of entities]]>
+      </doc>
+    </method>
+    <method name="setEntities"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entities" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the entity list to the given list of entities
+ 
+ @param entities
+          a list of entities]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The class that hosts a list of timeline entities.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timeline.TimelineEntities -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timeline.TimelineEntity -->
+  <class name="TimelineEntity" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="TimelineEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getEntityType" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the entity type
+ 
+ @return the entity type]]>
+      </doc>
+    </method>
+    <method name="setEntityType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityType" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the entity type
+ 
+ @param entityType
+          the entity type]]>
+      </doc>
+    </method>
+    <method name="getEntityId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the entity Id
+ 
+ @return the entity Id]]>
+      </doc>
+    </method>
+    <method name="setEntityId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityId" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the entity Id
+ 
+ @param entityId
+          the entity Id]]>
+      </doc>
+    </method>
+    <method name="getStartTime" return="java.lang.Long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the start time of the entity
+ 
+ @return the start time of the entity]]>
+      </doc>
+    </method>
+    <method name="setStartTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="startTime" type="java.lang.Long"/>
+      <doc>
+      <![CDATA[Set the start time of the entity
+ 
+ @param startTime
+          the start time of the entity]]>
+      </doc>
+    </method>
+    <method name="getEvents" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a list of events related to the entity
+ 
+ @return a list of events related to the entity]]>
+      </doc>
+    </method>
+    <method name="addEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEvent"/>
+      <doc>
+      <![CDATA[Add a single event related to the entity to the existing event list
+ 
+ @param event
+          a single event related to the entity]]>
+      </doc>
+    </method>
+    <method name="addEvents"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="events" type="java.util.List"/>
+      <doc>
+      <![CDATA[Add a list of events related to the entity to the existing event list
+ 
+ @param events
+          a list of events related to the entity]]>
+      </doc>
+    </method>
+    <method name="setEvents"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="events" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the event list to the given list of events related to the entity
+ 
+ @param events
+          events a list of events related to the entity]]>
+      </doc>
+    </method>
+    <method name="getRelatedEntities" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the related entities
+ 
+ @return the related entities]]>
+      </doc>
+    </method>
+    <method name="addRelatedEntity"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityType" type="java.lang.String"/>
+      <param name="entityId" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Add an entity to the existing related entity map
+ 
+ @param entityType
+          the entity type
+ @param entityId
+          the entity Id]]>
+      </doc>
+    </method>
+    <method name="addRelatedEntities"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="relatedEntities" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Add a map of related entities to the existing related entity map
+ 
+ @param relatedEntities
+          a map of related entities]]>
+      </doc>
+    </method>
+    <method name="setRelatedEntities"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="relatedEntities" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Set the related entity map to the given map of related entities
+ 
+ @param relatedEntities
+          a map of related entities]]>
+      </doc>
+    </method>
+    <method name="getPrimaryFilters" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the primary filters
+ 
+ @return the primary filters]]>
+      </doc>
+    </method>
+    <method name="addPrimaryFilter"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Add a single piece of primary filter to the existing primary filter map
+ 
+ @param key
+          the primary filter key
+ @param value
+          the primary filter value]]>
+      </doc>
+    </method>
+    <method name="addPrimaryFilters"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="primaryFilters" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Add a map of primary filters to the existing primary filter map
+ 
+ @param primaryFilters
+          a map of primary filters]]>
+      </doc>
+    </method>
+    <method name="setPrimaryFilters"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="primaryFilters" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Set the primary filter map to the given map of primary filters
+ 
+ @param primaryFilters
+          a map of primary filters]]>
+      </doc>
+    </method>
+    <method name="getOtherInfo" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the other information of the entity
+ 
+ @return the other information of the entity]]>
+      </doc>
+    </method>
+    <method name="addOtherInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Add one piece of other information of the entity to the existing other info
+ map
+ 
+ @param key
+          the other information key
+ @param value
+          the other information value]]>
+      </doc>
+    </method>
+    <method name="addOtherInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="otherInfo" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Add a map of other information of the entity to the existing other info map
+ 
+ @param otherInfo
+          a map of other information]]>
+      </doc>
+    </method>
+    <method name="setOtherInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="otherInfo" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Set the other info map to the given map of other information
+ 
+ @param otherInfo
+          a map of other information]]>
+      </doc>
+    </method>
+    <method name="getDomainId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the ID of the domain that the entity is to be put
+ 
+ @return the domain ID]]>
+      </doc>
+    </method>
+    <method name="setDomainId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="domainId" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the ID of the domain that the entity is to be put
+ 
+ @param domainId
+          the name space ID]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEntity"/>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The class that contains the the meta information of some conceptual entity
+ and its related events. The entity can be an application, an application
+ attempt, a container or whatever the user-defined object.
+ </p>
+ 
+ <p>
+ Primary filters will be used to index the entities in
+ <code>TimelineStore</code>, such that users should carefully choose the
+ information they want to store as the primary filters. The remaining can be
+ stored as other information.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timeline.TimelineEntity -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId -->
+  <class name="TimelineEntityGroupId" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="TimelineEntityGroupId"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="newInstance" return="org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="id" type="java.lang.String"/>
+    </method>
+    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>ApplicationId</code> of the
+ <code>TimelineEntityGroupId</code>.
+
+ @return <code>ApplicationId</code> of the
+         <code>TimelineEntityGroupId</code>]]>
+      </doc>
+    </method>
+    <method name="setApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appID" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+    </method>
+    <method name="getTimelineEntityGroupId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <code>timelineEntityGroupId</code>.
+
+ @return <code>timelineEntityGroupId</code>]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="fromString" return="org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineEntityGroupIdStr" type="java.lang.String"/>
+    </method>
+    <field name="TIMELINE_ENTITY_GROUPID_STR_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[<p><code>TimelineEntityGroupId</code> is an abstract way for
+ timeline service users to represent #a group of related timeline data.
+ For example, all entities that represents one data flow DAG execution
+ can be grouped into one timeline entity group. </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timeline.TimelineEvent -->
+  <class name="TimelineEvent" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="TimelineEvent"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getTimestamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the timestamp of the event
+ 
+ @return the timestamp of the event]]>
+      </doc>
+    </method>
+    <method name="setTimestamp"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timestamp" type="long"/>
+      <doc>
+      <![CDATA[Set the timestamp of the event
+ 
+ @param timestamp
+          the timestamp of the event]]>
+      </doc>
+    </method>
+    <method name="getEventType" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the event type
+ 
+ @return the event type]]>
+      </doc>
+    </method>
+    <method name="setEventType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventType" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the event type
+ 
+ @param eventType
+          the event type]]>
+      </doc>
+    </method>
+    <method name="getEventInfo" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Set the information of the event
+ 
+ @return the information of the event]]>
+      </doc>
+    </method>
+    <method name="addEventInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Add one piece of the information of the event to the existing information
+ map
+ 
+ @param key
+          the information key
+ @param value
+          the information value]]>
+      </doc>
+    </method>
+    <method name="addEventInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventInfo" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Add a map of the information of the event to the existing information map
+ 
+ @param eventInfo
+          a map of of the information of the event]]>
+      </doc>
+    </method>
+    <method name="setEventInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventInfo" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Set the information map to the given map of the information of the event
+ 
+ @param eventInfo
+          a map of of the information of the event]]>
+      </doc>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEvent"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[The class that contains the information of an event that is related to some
+ conceptual entity of an application. Users are free to define what the event
+ means, such as starting an application, getting allocated a container and
+ etc.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timeline.TimelineEvent -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timeline.TimelineEvents -->
+  <class name="TimelineEvents" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineEvents"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAllEvents" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a list of {@link EventsOfOneEntity} instances
+ 
+ @return a list of {@link EventsOfOneEntity} instances]]>
+      </doc>
+    </method>
+    <method name="addEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventsOfOneEntity" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity"/>
+      <doc>
+      <![CDATA[Add a single {@link EventsOfOneEntity} instance into the existing list
+ 
+ @param eventsOfOneEntity
+          a single {@link EventsOfOneEntity} instance]]>
+      </doc>
+    </method>
+    <method name="addEvents"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allEvents" type="java.util.List"/>
+      <doc>
+      <![CDATA[Add a list of {@link EventsOfOneEntity} instances into the existing list
+ 
+ @param allEvents
+          a list of {@link EventsOfOneEntity} instances]]>
+      </doc>
+    </method>
+    <method name="setEvents"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allEvents" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the list to the given list of {@link EventsOfOneEntity} instances
+ 
+ @param allEvents
+          a list of {@link EventsOfOneEntity} instances]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The class that hosts a list of events, which are categorized according to
+ their related entities.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timeline.TimelineEvents -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity -->
+  <class name="TimelineEvents.EventsOfOneEntity" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="EventsOfOneEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getEntityId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the entity Id
+ 
+ @return the entity Id]]>
+      </doc>
+    </method>
+    <method name="setEntityId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityId" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the entity Id
+ 
+ @param entityId
+          the entity Id]]>
+      </doc>
+    </method>
+    <method name="getEntityType" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the entity type
+ 
+ @return the entity type]]>
+      </doc>
+    </method>
+    <method name="setEntityType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityType" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the entity type
+ 
+ @param entityType
+          the entity type]]>
+      </doc>
+    </method>
+    <method name="getEvents" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a list of events
+ 
+ @return a list of events]]>
+      </doc>
+    </method>
+    <method name="addEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEvent"/>
+      <doc>
+      <![CDATA[Add a single event to the existing event list
+ 
+ @param event
+          a single event]]>
+      </doc>
+    </method>
+    <method name="addEvents"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="events" type="java.util.List"/>
+      <doc>
+      <![CDATA[Add a list of event to the existing event list
+ 
+ @param events
+          a list of events]]>
+      </doc>
+    </method>
+    <method name="setEvents"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="events" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the event list to the given list of events
+ 
+ @param events
+          a list of events]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The class that hosts a list of events that are only related to one entity.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timeline.TimelineEvents.EventsOfOneEntity -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timeline.TimelineHealth -->
+  <class name="TimelineHealth" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineHealth" type="org.apache.hadoop.yarn.api.records.timeline.TimelineHealth.TimelineHealthStatus, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="TimelineHealth"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getHealthStatus" return="org.apache.hadoop.yarn.api.records.timeline.TimelineHealth.TimelineHealthStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDiagnosticsInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setHealthStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="healthStatus" type="org.apache.hadoop.yarn.api.records.timeline.TimelineHealth.TimelineHealthStatus"/>
+    </method>
+    <method name="setDiagnosticsInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="diagnosticsInfo" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[This class holds health information for ATS.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timeline.TimelineHealth -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse -->
+  <class name="TimelinePutResponse" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelinePutResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getErrors" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a list of {@link TimelinePutError} instances
+ 
+ @return a list of {@link TimelinePutError} instances]]>
+      </doc>
+    </method>
+    <method name="addError"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="error" type="org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError"/>
+      <doc>
+      <![CDATA[Add a single {@link TimelinePutError} instance into the existing list
+ 
+ @param error
+          a single {@link TimelinePutError} instance]]>
+      </doc>
+    </method>
+    <method name="addErrors"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="errors" type="java.util.List"/>
+      <doc>
+      <![CDATA[Add a list of {@link TimelinePutError} instances into the existing list
+ 
+ @param errors
+          a list of {@link TimelinePutError} instances]]>
+      </doc>
+    </method>
+    <method name="setErrors"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="errors" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the list to the given list of {@link TimelinePutError} instances
+ 
+ @param errors
+          a list of {@link TimelinePutError} instances]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A class that holds a list of put errors. This is the response returned when a
+ list of {@link TimelineEntity} objects is added to the timeline. If there are errors
+ in storing individual entity objects, they will be indicated in the list of
+ errors.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError -->
+  <class name="TimelinePutResponse.TimelinePutError" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelinePutError"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getEntityId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the entity Id
+ 
+ @return the entity Id]]>
+      </doc>
+    </method>
+    <method name="setEntityId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityId" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the entity Id
+ 
+ @param entityId
+          the entity Id]]>
+      </doc>
+    </method>
+    <method name="getEntityType" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the entity type
+ 
+ @return the entity type]]>
+      </doc>
+    </method>
+    <method name="setEntityType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityType" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the entity type
+ 
+ @param entityType
+          the entity type]]>
+      </doc>
+    </method>
+    <method name="getErrorCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the error code
+ 
+ @return an error code]]>
+      </doc>
+    </method>
+    <method name="setErrorCode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="errorCode" type="int"/>
+      <doc>
+      <![CDATA[Set the error code to the given error code
+ 
+ @param errorCode
+          an error code]]>
+      </doc>
+    </method>
+    <field name="NO_START_TIME" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Error code returned when no start time can be found when putting an
+ entity. This occurs when the entity does not already exist in the store
+ and it is put with no start time or events specified.]]>
+      </doc>
+    </field>
+    <field name="IO_EXCEPTION" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Error code returned if an IOException is encountered when putting an
+ entity.]]>
+      </doc>
+    </field>
+    <field name="SYSTEM_FILTER_CONFLICT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Error code returned if the user specifies the timeline system reserved
+ filter key]]>
+      </doc>
+    </field>
+    <field name="ACCESS_DENIED" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Error code returned if the user is denied to access the timeline data]]>
+      </doc>
+    </field>
+    <field name="NO_DOMAIN" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Error code returned if the entity doesn't have an valid domain ID]]>
+      </doc>
+    </field>
+    <field name="FORBIDDEN_RELATION" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Error code returned if the user is denied to relate the entity to another
+ one in different domain]]>
+      </doc>
+    </field>
+    <field name="EXPIRED_ENTITY" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Error code returned if the entity start time is before the eviction
+ period of old data.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[A class that holds the error code for one entity.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse.TimelinePutError -->
+</package>
+<package name="org.apache.hadoop.yarn.api.records.timelineservice">
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.ApplicationAttemptEntity -->
+  <class name="ApplicationAttemptEntity" extends="org.apache.hadoop.yarn.api.records.timelineservice.HierarchicalTimelineEntity"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ApplicationAttemptEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ApplicationAttemptEntity" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This entity represents an application attempt.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.ApplicationAttemptEntity -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity -->
+  <class name="ApplicationEntity" extends="org.apache.hadoop.yarn.api.records.timelineservice.HierarchicalTimelineEntity"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ApplicationEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ApplicationEntity" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getQueue" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setQueue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queue" type="java.lang.String"/>
+    </method>
+    <method name="isApplicationEntity" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="te" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"/>
+      <doc>
+      <![CDATA[Checks if the input TimelineEntity object is an ApplicationEntity.
+
+ @param te TimelineEntity object.
+ @return true if input is an ApplicationEntity, false otherwise]]>
+      </doc>
+    </method>
+    <method name="getApplicationEvent" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="te" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"/>
+      <param name="eventId" type="java.lang.String"/>
+      <doc>
+      <![CDATA[@param te TimelineEntity object.
+ @param eventId event with this id needs to be fetched
+ @return TimelineEvent if TimelineEntity contains the desired event.]]>
+      </doc>
+    </method>
+    <field name="QUEUE_INFO_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This entity represents an application.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.ApplicationEntity -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.ClusterEntity -->
+  <class name="ClusterEntity" extends="org.apache.hadoop.yarn.api.records.timelineservice.HierarchicalTimelineEntity"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ClusterEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ClusterEntity" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This entity represents a YARN cluster.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.ClusterEntity -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.ContainerEntity -->
+  <class name="ContainerEntity" extends="org.apache.hadoop.yarn.api.records.timelineservice.HierarchicalTimelineEntity"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerEntity" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This entity represents a container belonging to an application attempt.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.ContainerEntity -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity -->
+  <class name="FlowActivityEntity" extends="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FlowActivityEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FlowActivityEntity" type="java.lang.String, long, java.lang.String, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FlowActivityEntity" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entity" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"/>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+      <doc>
+      <![CDATA[Reuse the base class equals method.]]>
+      </doc>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Reuse the base class hashCode method.]]>
+      </doc>
+    </method>
+    <method name="getCluster" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setCluster"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cluster" type="java.lang.String"/>
+    </method>
+    <method name="getDate" return="java.util.Date"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setDate"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="time" type="long"/>
+    </method>
+    <method name="getUser" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setUser"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+    </method>
+    <method name="getFlowName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setFlowName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="flowName" type="java.lang.String"/>
+    </method>
+    <method name="addFlowRun"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="run" type="org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity"/>
+    </method>
+    <method name="addFlowRuns"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="runs" type="java.util.Collection"/>
+    </method>
+    <method name="getFlowRuns" return="java.util.NavigableSet"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNumberOfRuns" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="CLUSTER_INFO_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DATE_INFO_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="USER_INFO_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FLOW_NAME_INFO_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Entity that represents a record for flow activity. It's essentially a
+ container entity for flow runs with limited information.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity -->
+  <class name="FlowRunEntity" extends="org.apache.hadoop.yarn.api.records.timelineservice.HierarchicalTimelineEntity"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="FlowRunEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="FlowRunEntity" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setUser"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setName"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="getVersion" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setVersion"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="version" type="java.lang.String"/>
+    </method>
+    <method name="getRunId" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setRunId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="runId" type="long"/>
+    </method>
+    <method name="getStartTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setStartTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="startTime" type="long"/>
+    </method>
+    <method name="getMaxEndTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setMaxEndTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="endTime" type="long"/>
+    </method>
+    <field name="USER_INFO_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FLOW_NAME_INFO_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FLOW_VERSION_INFO_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FLOW_RUN_ID_INFO_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FLOW_RUN_END_TIME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This entity represents a flow run.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.HierarchicalTimelineEntity -->
+  <class name="HierarchicalTimelineEntity" extends="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getParent" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity.Identifier"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setParent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="parent" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity.Identifier"/>
+    </method>
+    <method name="setParent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="java.lang.String"/>
+      <param name="id" type="java.lang.String"/>
+    </method>
+    <method name="getChildren" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setChildren"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="children" type="java.util.Set"/>
+    </method>
+    <method name="addChildren"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="children" type="java.util.Set"/>
+    </method>
+    <method name="addChild"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="child" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity.Identifier"/>
+    </method>
+    <method name="addChild"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="java.lang.String"/>
+      <param name="id" type="java.lang.String"/>
+    </method>
+    <field name="PARENT_INFO_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CHILDREN_INFO_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class extends timeline entity and defines parent-child relationships
+ with other entities.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.HierarchicalTimelineEntity -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.QueueEntity -->
+  <class name="QueueEntity" extends="org.apache.hadoop.yarn.api.records.timelineservice.HierarchicalTimelineEntity"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="QueueEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="QueueEntity" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This entity represents a queue.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.QueueEntity -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.SubApplicationEntity -->
+  <class name="SubApplicationEntity" extends="org.apache.hadoop.yarn.api.records.timelineservice.HierarchicalTimelineEntity"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SubApplicationEntity" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="isSubApplicationEntity" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="te" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"/>
+      <doc>
+      <![CDATA[Checks if the input TimelineEntity object is an SubApplicationEntity.
+
+ @param te TimelineEntity object.
+ @return true if input is an SubApplicationEntity, false otherwise]]>
+      </doc>
+    </method>
+    <method name="setApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="java.lang.String"/>
+    </method>
+    <field name="YARN_APPLICATION_ID" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This entity represents a user defined entities to be stored under sub
+ application table.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.SubApplicationEntity -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.TimelineDomain -->
+  <class name="TimelineDomain" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineDomain"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the domain ID.
+ @return the domain ID]]>
+      </doc>
+    </method>
+    <method name="setId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="id" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the domain ID.
+ @param id the domain ID]]>
+      </doc>
+    </method>
+    <method name="getDescription" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the domain description.
+ @return the domain description]]>
+      </doc>
+    </method>
+    <method name="setDescription"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="description" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the domain description.
+ @param description the domain description]]>
+      </doc>
+    </method>
+    <method name="getOwner" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the domain owner.
+ @return the domain owner]]>
+      </doc>
+    </method>
+    <method name="setOwner"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="owner" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the domain owner. The user doesn't need to set it, which will
+ automatically set to the user who puts the domain.
+ @param owner the domain owner]]>
+      </doc>
+    </method>
+    <method name="getReaders" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the reader (and/or reader group) list string.
+ @return the reader (and/or reader group) list string]]>
+      </doc>
+    </method>
+    <method name="setReaders"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="readers" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the reader (and/or reader group) list string.
+ @param readers the reader (and/or reader group) list string]]>
+      </doc>
+    </method>
+    <method name="getWriters" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the writer (and/or writer group) list string.
+ @return the writer (and/or writer group) list string]]>
+      </doc>
+    </method>
+    <method name="setWriters"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="writers" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the writer (and/or writer group) list string.
+ @param writers the writer (and/or writer group) list string]]>
+      </doc>
+    </method>
+    <method name="getCreatedTime" return="java.lang.Long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the created time of the domain.
+ @return the created time of the domain]]>
+      </doc>
+    </method>
+    <method name="setCreatedTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="createdTime" type="java.lang.Long"/>
+      <doc>
+      <![CDATA[Set the created time of the domain.
+ @param createdTime the created time of the domain]]>
+      </doc>
+    </method>
+    <method name="getModifiedTime" return="java.lang.Long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the modified time of the domain.
+ @return the modified time of the domain]]>
+      </doc>
+    </method>
+    <method name="setModifiedTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="modifiedTime" type="java.lang.Long"/>
+      <doc>
+      <![CDATA[Set the modified time of the domain.
+ @param modifiedTime the modified time of the domain]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ This class contains the information about a timeline service domain, which is
+ used to a user to host a number of timeline entities, isolating them from
+ others'. The user can also define the reader and writer users/groups for
+ the domain, which is used to control the access to its entities.
+ </p>
+ <p>
+ The reader and writer users/groups pattern that the user can supply is the
+ same as what <code>AccessControlList</code> takes.
+ </p>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.TimelineDomain -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities -->
+  <class name="TimelineEntities" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineEntities"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getEntities" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setEntities"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineEntities" type="java.util.List"/>
+    </method>
+    <method name="addEntities"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineEntities" type="java.util.List"/>
+    </method>
+    <method name="addEntity"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entity" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"/>
+    </method>
+    <doc>
+    <![CDATA[This class hosts a set of timeline entities.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntities -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity -->
+  <class name="TimelineEntity" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="TimelineEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="TimelineEntity" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[<p>
+ The constuctor is used to construct a proxy {@link TimelineEntity} or its
+ subclass object from the real entity object that carries information.
+ </p>
+
+ <p>
+ It is usually used in the case where we want to recover class polymorphism
+ after deserializing the entity from its JSON form.
+ </p>
+ @param entity the real entity that carries information]]>
+      </doc>
+    </constructor>
+    <constructor name="TimelineEntity" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getType" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="java.lang.String"/>
+    </method>
+    <method name="getId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="id" type="java.lang.String"/>
+    </method>
+    <method name="getIdentifier" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity.Identifier"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setIdentifier"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityIdentifier" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity.Identifier"/>
+    </method>
+    <method name="getInfo" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityInfos" type="java.util.Map"/>
+    </method>
+    <method name="addInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityInfos" type="java.util.Map"/>
+    </method>
+    <method name="addInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="java.lang.Object"/>
+    </method>
+    <method name="getConfigs" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setConfigs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityConfigs" type="java.util.Map"/>
+    </method>
+    <method name="addConfigs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityConfigs" type="java.util.Map"/>
+    </method>
+    <method name="addConfig"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="java.lang.String"/>
+    </method>
+    <method name="getMetrics" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setMetrics"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityMetrics" type="java.util.Set"/>
+    </method>
+    <method name="addMetrics"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityMetrics" type="java.util.Set"/>
+    </method>
+    <method name="addMetric"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="metric" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric"/>
+    </method>
+    <method name="getEvents" return="java.util.NavigableSet"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setEvents"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityEvents" type="java.util.NavigableSet"/>
+    </method>
+    <method name="addEvents"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityEvents" type="java.util.Set"/>
+    </method>
+    <method name="addEvent"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent"/>
+    </method>
+    <method name="getIsRelatedToEntities" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setIsRelatedToEntities"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="isRelatedTo" type="java.util.Map"/>
+    </method>
+    <method name="addIsRelatedToEntities"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="isRelatedTo" type="java.util.Map"/>
+    </method>
+    <method name="addIsRelatedToEntity"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="java.lang.String"/>
+      <param name="id" type="java.lang.String"/>
+    </method>
+    <method name="getRelatesToEntities" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="addRelatesToEntities"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="relatesTo" type="java.util.Map"/>
+    </method>
+    <method name="addRelatesToEntity"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="java.lang.String"/>
+      <param name="id" type="java.lang.String"/>
+    </method>
+    <method name="setRelatesToEntities"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="relatesTo" type="java.util.Map"/>
+    </method>
+    <method name="getCreatedTime" return="java.lang.Long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setCreatedTime"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="createdTs" type="java.lang.Long"/>
+    </method>
+    <method name="setUID"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uidKey" type="java.lang.String"/>
+      <param name="uId" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set UID in info which will be then used for query by UI.
+ @param uidKey key for UID in info.
+ @param uId UID to be set for the key.]]>
+      </doc>
+    </method>
+    <method name="isValid" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"/>
+    </method>
+    <method name="getReal" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getIdPrefix" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setIdPrefix"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entityIdPrefix" type="long"/>
+      <doc>
+      <![CDATA[Sets idPrefix for an entity.
+ <p>
+ <b>Note</b>: Entities will be stored in the order of idPrefix specified.
+ If users decide to set idPrefix for an entity, they <b>MUST</b> provide
+ the same prefix for every update of this entity.
+ </p>
+ Example: <blockquote><pre>
+ TimelineEntity entity = new TimelineEntity();
+ entity.setIdPrefix(value);
+ </pre></blockquote>
+ Users can use {@link TimelineServiceHelper#invertLong(long)} to invert
+ the prefix if necessary.
+
+ @param entityIdPrefix prefix for an entity.]]>
+      </doc>
+    </method>
+    <field name="SYSTEM_INFO_KEY_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ENTITY_PREFIX" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[The basic timeline entity data structure for timeline service v2. Timeline
+ entity objects are not thread safe and should not be accessed concurrently.
+ All collection members will be initialized into empty collections. Two
+ timeline entities are equal iff. their type and id are identical.
+
+ All non-primitive type, non-collection members will be initialized into null.
+ User should set the type and id of a timeline entity to make it valid (can be
+ checked by using the {@link #isValid()} method). Callers to the getters
+ should perform null checks for non-primitive type, non-collection members.
+
+ Callers are recommended not to alter the returned collection objects from the
+ getters.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType -->
+  <class name="TimelineEntityType" extends="java.lang.Enum"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="values" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType[]"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="valueOf" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+    </method>
+    <method name="isParent" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType"/>
+      <doc>
+      <![CDATA[Whether the input type can be a parent of this entity.
+
+ @param type entity type.
+ @return true, if this entity type is parent of passed entity type, false
+     otherwise.]]>
+      </doc>
+    </method>
+    <method name="isChild" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType"/>
+      <doc>
+      <![CDATA[Whether the input type can be a child of this entity.
+
+ @param type entity type.
+ @return true, if this entity type is child of passed entity type, false
+     otherwise.]]>
+      </doc>
+    </method>
+    <method name="matches" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="typeString" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Whether the type of this entity matches the type indicated by the input
+ argument.
+
+ @param typeString entity type represented as a string.
+ @return true, if string representation of this entity type matches the
+     entity type passed.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Defines type of entity.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntityType -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent -->
+  <class name="TimelineEvent" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Comparable"/>
+    <constructor name="TimelineEvent"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventId" type="java.lang.String"/>
+    </method>
+    <method name="getInfo" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="infos" type="java.util.Map"/>
+    </method>
+    <method name="addInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="infos" type="java.util.Map"/>
+    </method>
+    <method name="addInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="java.lang.String"/>
+      <param name="value" type="java.lang.Object"/>
+    </method>
+    <method name="getTimestamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setTimestamp"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ts" type="long"/>
+    </method>
+    <method name="isValid" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="compareTo" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent"/>
+    </method>
+    <field name="INVALID_TIMESTAMP" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class contains the information of an event that belongs to an entity.
+ Users are free to define what the event means, such as starting an
+ application, container being allocated, etc.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.TimelineEvent -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric -->
+  <class name="TimelineMetric" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineMetric"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="TimelineMetric" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getType" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="metricType" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric.Type"/>
+    </method>
+    <method name="getId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="metricId" type="java.lang.String"/>
+    </method>
+    <method name="getRealtimeAggregationOp" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetricOperation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the real time aggregation operation of this metric.
+
+ @return Real time aggregation operation]]>
+      </doc>
+    </method>
+    <method name="setRealtimeAggregationOp"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="op" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetricOperation"/>
+      <doc>
+      <![CDATA[Set the real time aggregation operation of this metric.
+
+ @param op A timeline metric operation that the metric should perform on
+           real time aggregations]]>
+      </doc>
+    </method>
+    <method name="getValues" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setValues"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="vals" type="java.util.Map"/>
+    </method>
+    <method name="addValues"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="vals" type="java.util.Map"/>
+    </method>
+    <method name="addValue"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timestamp" type="long"/>
+      <param name="value" type="java.lang.Number"/>
+    </method>
+    <method name="isValid" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getLatestSingleValueMetric" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="metric" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric"/>
+      <doc>
+      <![CDATA[Get the latest timeline metric as single value type.
+
+ @param metric Incoming timeline metric
+ @return The latest metric in the incoming metric]]>
+      </doc>
+    </method>
+    <method name="getSingleDataTimestamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get single data timestamp of the metric.
+
+ @return the single data timestamp]]>
+      </doc>
+    </method>
+    <method name="getSingleDataValue" return="java.lang.Number"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get single data value of the metric.
+
+ @return the single data value]]>
+      </doc>
+    </method>
+    <method name="aggregateTo" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="incomingMetric" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric"/>
+      <param name="baseAggregatedMetric" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric"/>
+      <doc>
+      <![CDATA[Aggregate an incoming metric to the base aggregated metric with the given
+ operation state in a stateless fashion. The assumption here is
+ baseAggregatedMetric and latestMetric should be single value data if not
+ null.
+
+ @param incomingMetric Incoming timeline metric to aggregate
+ @param baseAggregatedMetric Base timeline metric
+ @return Result metric after aggregation]]>
+      </doc>
+    </method>
+    <method name="aggregateTo" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="incomingMetric" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric"/>
+      <param name="baseAggregatedMetric" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric"/>
+      <param name="state" type="java.util.Map"/>
+      <doc>
+      <![CDATA[Aggregate an incoming metric to the base aggregated metric with the given
+ operation state. The assumption here is baseAggregatedMetric and
+ latestMetric should be single value data if not null.
+
+ @param incomingMetric Incoming timeline metric to aggregate
+ @param baseAggregatedMetric Base timeline metric
+ @param state Operation state
+ @return Result metric after aggregation]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This class contains the information of a metric that is related to some
+ entity. Metric can either be a time series or single value.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.TimelineMetric -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse -->
+  <class name="TimelineWriteResponse" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineWriteResponse"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getErrors" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a list of {@link TimelineWriteError} instances.
+
+ @return a list of {@link TimelineWriteError} instances]]>
+      </doc>
+    </method>
+    <method name="addError"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="error" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse.TimelineWriteError"/>
+      <doc>
+      <![CDATA[Add a single {@link TimelineWriteError} instance into the existing list.
+
+ @param error
+          a single {@link TimelineWriteError} instance]]>
+      </doc>
+    </method>
+    <method name="addErrors"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="writeErrors" type="java.util.List"/>
+      <doc>
+      <![CDATA[Add a list of {@link TimelineWriteError} instances into the existing list.
+
+ @param writeErrors
+          a list of {@link TimelineWriteError} instances]]>
+      </doc>
+    </method>
+    <method name="setErrors"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="writeErrors" type="java.util.List"/>
+      <doc>
+      <![CDATA[Set the list to the given list of {@link TimelineWriteError} instances.
+
+ @param writeErrors
+          a list of {@link TimelineWriteError} instances]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A class that holds a list of put errors. This is the response returned when a
+ list of {@link TimelineEntity} objects is added to the timeline. If there are
+ errors in storing individual entity objects, they will be indicated in the
+ list of errors.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse.TimelineWriteError -->
+  <class name="TimelineWriteResponse.TimelineWriteError" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineWriteError"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getEntityId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the entity Id.
+
+ @return the entity Id]]>
+      </doc>
+    </method>
+    <method name="setEntityId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="id" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the entity Id.
+
+ @param id the entity Id.]]>
+      </doc>
+    </method>
+    <method name="getEntityType" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the entity type.
+
+ @return the entity type]]>
+      </doc>
+    </method>
+    <method name="setEntityType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="type" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Set the entity type.
+
+ @param type the entity type.]]>
+      </doc>
+    </method>
+    <method name="getErrorCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the error code.
+
+ @return an error code]]>
+      </doc>
+    </method>
+    <method name="setErrorCode"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="code" type="int"/>
+      <doc>
+      <![CDATA[Set the error code to the given error code.
+
+ @param code an error code.]]>
+      </doc>
+    </method>
+    <field name="IO_EXCEPTION" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Error code returned if an IOException is encountered when storing an
+ entity.]]>
+      </doc>
+    </field>
+    <doc>
+    <![CDATA[A class that holds the error code for one entity.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.TimelineWriteResponse.TimelineWriteError -->
+  <!-- start class org.apache.hadoop.yarn.api.records.timelineservice.UserEntity -->
+  <class name="UserEntity" extends="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UserEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="UserEntity" type="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This entity represents a user.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.records.timelineservice.UserEntity -->
+</package>
+<package name="org.apache.hadoop.yarn.api.resource">
+  <!-- start class org.apache.hadoop.yarn.api.resource.PlacementConstraint -->
+  <class name="PlacementConstraint" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="PlacementConstraint" type="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getConstraintExpr" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the constraint expression of the placement constraint.
+
+ @return the constraint expression]]>
+      </doc>
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[{@code PlacementConstraint} represents a placement constraint for a resource
+ allocation.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.resource.PlacementConstraint -->
+  <!-- start class org.apache.hadoop.yarn.api.resource.PlacementConstraints -->
+  <class name="PlacementConstraints" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="targetIn" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scope" type="java.lang.String"/>
+      <param name="targetExpressions" type="org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression[]"/>
+      <doc>
+      <![CDATA[Creates a constraint that requires allocations to be placed on nodes that
+ satisfy all target expressions within the given scope (e.g., node or rack).
+
+ For example, {@code targetIn(RACK, allocationTag("hbase-m"))}, allows
+ allocations on nodes that belong to a rack that has at least one tag with
+ value "hbase-m".
+
+ @param scope the scope within which the target expressions should be
+          satisfied
+ @param targetExpressions the expressions that need to be satisfied within
+          the scope
+ @return the resulting placement constraint]]>
+      </doc>
+    </method>
+    <method name="targetNotIn" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scope" type="java.lang.String"/>
+      <param name="targetExpressions" type="org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression[]"/>
+      <doc>
+      <![CDATA[Creates a constraint that requires allocations to be placed on nodes that
+ belong to a scope (e.g., node or rack) that does not satisfy any of the
+ target expressions.
+
+ @param scope the scope within which the target expressions should not be
+          true
+ @param targetExpressions the expressions that need to not be true within
+          the scope
+ @return the resulting placement constraint]]>
+      </doc>
+    </method>
+    <method name="targetNodeAttribute" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scope" type="java.lang.String"/>
+      <param name="opCode" type="org.apache.hadoop.yarn.api.records.NodeAttributeOpCode"/>
+      <param name="targetExpressions" type="org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression[]"/>
+      <doc>
+      <![CDATA[Creates a constraint that requires allocations to be placed on nodes that
+ belong to a scope (e.g., node or rack) that satisfy any of the
+ target expressions based on node attribute op code.
+
+ @param scope the scope within which the target expressions should not be
+          true
+ @param opCode Node Attribute code which could be equals, not equals.
+ @param targetExpressions the expressions that need to not be true within
+          the scope
+ @return the resulting placement constraint]]>
+      </doc>
+    </method>
+    <method name="cardinality" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scope" type="java.lang.String"/>
+      <param name="minCardinality" type="int"/>
+      <param name="maxCardinality" type="int"/>
+      <param name="allocationTags" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Creates a constraint that restricts the number of allocations within a
+ given scope (e.g., node or rack).
+
+ For example, {@code cardinality(NODE, 3, 10, "zk")} is satisfied on nodes
+ where there are no less than 3 allocations with tag "zk" and no more than
+ 10.
+
+ @param scope the scope of the constraint
+ @param minCardinality determines the minimum number of allocations within
+          the scope
+ @param maxCardinality determines the maximum number of allocations within
+          the scope
+ @param allocationTags the constraint targets allocations with these tags
+ @return the resulting placement constraint]]>
+      </doc>
+    </method>
+    <method name="cardinality" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scope" type="java.lang.String"/>
+      <param name="namespace" type="java.lang.String"/>
+      <param name="minCardinality" type="int"/>
+      <param name="maxCardinality" type="int"/>
+      <param name="allocationTags" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Similar to {@link #cardinality(String, int, int, String...)}, but let you
+ attach a namespace to the given allocation tags.
+
+ @param scope the scope of the constraint
+ @param namespace the namespace of the allocation tags
+ @param minCardinality determines the minimum number of allocations within
+                       the scope
+ @param maxCardinality determines the maximum number of allocations within
+                       the scope
+ @param allocationTags allocation tags
+ @return the resulting placement constraint]]>
+      </doc>
+    </method>
+    <method name="minCardinality" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scope" type="java.lang.String"/>
+      <param name="minCardinality" type="int"/>
+      <param name="allocationTags" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Similar to {@link #cardinality(String, int, int, String...)}, but
+ determines only the minimum cardinality (the maximum cardinality is
+ unbound).
+
+ @param scope the scope of the constraint
+ @param minCardinality determines the minimum number of allocations within
+          the scope
+ @param allocationTags the constraint targets allocations with these tags
+ @return the resulting placement constraint]]>
+      </doc>
+    </method>
+    <method name="minCardinality" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scope" type="java.lang.String"/>
+      <param name="namespace" type="java.lang.String"/>
+      <param name="minCardinality" type="int"/>
+      <param name="allocationTags" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Similar to {@link #minCardinality(String, int, String...)}, but let you
+ attach a namespace to the allocation tags.
+
+ @param scope the scope of the constraint
+ @param namespace the namespace of these tags
+ @param minCardinality determines the minimum number of allocations within
+                       the scope
+ @param allocationTags the constraint targets allocations with these tags
+ @return the resulting placement constraint]]>
+      </doc>
+    </method>
+    <method name="maxCardinality" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scope" type="java.lang.String"/>
+      <param name="maxCardinality" type="int"/>
+      <param name="allocationTags" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Similar to {@link #cardinality(String, int, int, String...)}, but
+ determines only the maximum cardinality (the minimum cardinality is 0).
+
+ @param scope the scope of the constraint
+ @param maxCardinality determines the maximum number of allocations within
+          the scope
+ @param allocationTags the constraint targets allocations with these tags
+ @return the resulting placement constraint]]>
+      </doc>
+    </method>
+    <method name="maxCardinality" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scope" type="java.lang.String"/>
+      <param name="tagNamespace" type="java.lang.String"/>
+      <param name="maxCardinality" type="int"/>
+      <param name="allocationTags" type="java.lang.String[]"/>
+      <doc>
+      <![CDATA[Similar to {@link #maxCardinality(String, int, String...)}, but let you
+ specify a namespace for the tags, see supported namespaces in
+ {@link AllocationTagNamespaceType}.
+
+ @param scope the scope of the constraint
+ @param tagNamespace the namespace of these tags
+ @param maxCardinality determines the maximum number of allocations within
+          the scope
+ @param allocationTags allocation tags
+ @return the resulting placement constraint]]>
+      </doc>
+    </method>
+    <method name="targetCardinality" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="scope" type="java.lang.String"/>
+      <param name="minCardinality" type="int"/>
+      <param name="maxCardinality" type="int"/>
+      <param name="targetExpressions" type="org.apache.hadoop.yarn.api.resource.PlacementConstraint.TargetExpression[]"/>
+      <doc>
+      <![CDATA[This constraint generalizes the cardinality and target constraints.
+
+ Consider a set of nodes N that belongs to the scope specified in the
+ constraint. If the target expressions are satisfied at least minCardinality
+ times and at most maxCardinality times in the node set N, then the
+ constraint is satisfied.
+
+ For example, {@code targetCardinality(RACK, 2, 10, allocationTag("zk"))},
+ requires an allocation to be placed within a rack that has at least 2 and
+ at most 10 other allocations with tag "zk".
+
+ @param scope the scope of the constraint
+ @param minCardinality the minimum number of times the target expressions
+          have to be satisfied with the given scope
+ @param maxCardinality the maximum number of times the target expressions
+          have to be satisfied with the given scope
+ @param targetExpressions the target expressions
+ @return the resulting placement constraint]]>
+      </doc>
+    </method>
+    <method name="and" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.And"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="children" type="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint[]"/>
+      <doc>
+      <![CDATA[A conjunction of constraints.
+
+ @param children the children constraints that should all be satisfied
+ @return the resulting placement constraint]]>
+      </doc>
+    </method>
+    <method name="or" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.Or"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="children" type="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint[]"/>
+      <doc>
+      <![CDATA[A disjunction of constraints.
+
+ @param children the children constraints, one of which should be satisfied
+ @return the resulting placement constraint]]>
+      </doc>
+    </method>
+    <method name="delayedOr" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.DelayedOr"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="children" type="org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint[]"/>
+      <doc>
+      <![CDATA[Creates a composite constraint that includes a list of timed placement
+ constraints. The scheduler should try to satisfy first the first timed
+ child constraint within the specified time window. If this is not possible,
+ it should attempt to satisfy the second, and so on.
+
+ @param children the timed children constraints
+ @return the resulting composite constraint]]>
+      </doc>
+    </method>
+    <method name="timedClockConstraint" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="constraint" type="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"/>
+      <param name="delay" type="long"/>
+      <param name="timeUnit" type="java.util.concurrent.TimeUnit"/>
+      <doc>
+      <![CDATA[Creates a placement constraint that has to be satisfied within a time
+ window.
+
+ @param constraint the placement constraint
+ @param delay the length of the time window within which the constraint has
+          to be satisfied
+ @param timeUnit the unit of time of the time window
+ @return the resulting timed placement constraint]]>
+      </doc>
+    </method>
+    <method name="timedOpportunitiesConstraint" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.TimedPlacementConstraint"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="constraint" type="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"/>
+      <param name="delay" type="long"/>
+      <doc>
+      <![CDATA[Creates a placement constraint that has to be satisfied within a number of
+ placement opportunities (invocations of the scheduler).
+
+ @param constraint the placement constraint
+ @param delay the number of scheduling opportunities within which the
+          constraint has to be satisfied
+ @return the resulting timed placement constraint]]>
+      </doc>
+    </method>
+    <method name="build" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="constraintExpr" type="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"/>
+      <doc>
+      <![CDATA[Creates a {@link PlacementConstraint} given a constraint expression.
+
+ @param constraintExpr the constraint expression
+ @return the placement constraint]]>
+      </doc>
+    </method>
+    <field name="NODE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RACK" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NODE_PARTITION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class contains various static methods for the applications to create
+ placement constraints (see also {@link PlacementConstraint}).]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.api.resource.PlacementConstraints -->
+</package>
+<package name="org.apache.hadoop.yarn.conf">
+  <!-- start class org.apache.hadoop.yarn.conf.YarnConfiguration -->
+  <class name="YarnConfiguration" extends="org.apache.hadoop.conf.Configuration"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YarnConfiguration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="YarnConfiguration" type="org.apache.hadoop.conf.Configuration"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="isAclEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getSocketAddr" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="defaultAddress" type="java.lang.String"/>
+      <param name="defaultPort" type="int"/>
+      <doc>
+      <![CDATA[Get the socket address for <code>name</code> property as a
+ <code>InetSocketAddress</code>. On an HA cluster,
+ this fetches the address corresponding to the RM identified by
+ {@link #RM_HA_ID}.
+ @param name property name.
+ @param defaultAddress the default value
+ @param defaultPort the default port
+ @return InetSocketAddress]]>
+      </doc>
+    </method>
+    <method name="updateConnectAddr" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <param name="addr" type="java.net.InetSocketAddress"/>
+    </method>
+    <method name="useHttps" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="shouldRMFailFast" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="isDistSchedulingEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="isOpportunisticContainerAllocationEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="timelineServiceEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Returns whether the timeline service is enabled via configuration.
+
+ @param conf the configuration
+ @return whether the timeline service is enabled.]]>
+      </doc>
+    </method>
+    <method name="getTimelineServiceVersion" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Returns the timeline service version. It does not check whether the
+ timeline service itself is enabled.
+
+ @param conf the configuration
+ @return the timeline service version as a float.]]>
+      </doc>
+    </method>
+    <method name="timelineServiceV2Enabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Returns whether the timeline service v.2 is enabled via configuration.
+
+ @param conf the configuration
+ @return whether the timeline service v.2 is enabled. V.2 refers to a
+ version greater than equal to 2 but smaller than 3.]]>
+      </doc>
+    </method>
+    <method name="timelineServiceV1Enabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Returns whether the timeline service v.1 is enabled via configuration.
+
+ @param conf the configuration
+ @return whether the timeline service v.1 is enabled. V.1 refers to a
+ version greater than equal to 1 but smaller than 2.]]>
+      </doc>
+    </method>
+    <method name="timelineServiceV15Enabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Returns whether the timeline service v.1,5 is enabled via configuration.
+
+ @param conf the configuration
+ @return whether the timeline service v.1.5 is enabled. V.1.5 refers to a
+ version equal to 1.5.]]>
+      </doc>
+    </method>
+    <method name="systemMetricsPublisherEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Returns whether the system publisher is enabled.
+
+ @param conf the configuration
+ @return whether the system publisher is enabled.]]>
+      </doc>
+    </method>
+    <method name="numaAwarenessEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Returns whether the NUMA awareness is enabled.
+
+ @param conf the configuration
+ @return whether the NUMA awareness is enabled.]]>
+      </doc>
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <field name="DR_CONFIGURATION_FILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CS_CONFIGURATION_FILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_POLICY_CONFIGURATION_FILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_SITE_CONFIGURATION_FILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CORE_SITE_CONFIGURATION_FILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RESOURCE_TYPES_CONFIGURATION_FILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NODE_RESOURCES_CONFIGURATION_FILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_CONFIGURATION_FILES" type="java.util.List"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONFIGURATION_FILES" type="java.util.List"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="APPLICATION_MAX_TAGS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="APPLICATION_MAX_TAG_LENGTH" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RESOURCE_TYPES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_RESOURCES_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEBUG_NM_DELETE_DELAY_SEC" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Delay before deleting resource to ease debugging of NM issues]]>
+      </doc>
+    </field>
+    <field name="NM_LOG_CONTAINER_DEBUG_INFO" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_LOG_CONTAINER_DEBUG_INFO" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IPC_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IPC_CLIENT_FACTORY_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Factory to create client IPC classes.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_IPC_CLIENT_FACTORY_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IPC_SERVER_FACTORY_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Factory to create server IPC classes.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_IPC_SERVER_FACTORY_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IPC_RECORD_FACTORY_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Factory to create serializable records.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_IPC_RECORD_FACTORY_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IPC_RPC_IMPL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[RPC class implementation]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_IPC_RPC_IMPL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_CLUSTER_ID" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_CLUSTER_ID" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_HOSTNAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_EPOCH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_EPOCH" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_EPOCH_RANGE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The epoch range before wrap around. 0 disables wrap around]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_EPOCH_RANGE" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The address of the applications manager interface in the RM.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_APPLICATION_MAX_TAGS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Max number of application tags.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_APPLICATION_MAX_TAGS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_APPLICATION_MAX_TAG_LENGTH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Max length of each application tag.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_APPLICATION_MAX_TAG_LENGTH" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_APPLICATION_MASTER_SERVICE_PROCESSORS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_AUTO_UPDATE_CONTAINERS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_AUTO_UPDATE_CONTAINERS" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_BIND_HOST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The actual bind address for the RM.]]>
+      </doc>
+    </field>
+    <field name="RM_CLIENT_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The number of threads used to handle applications manager requests.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_CLIENT_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_AMLAUNCHER_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of threads used to launch/cleanup AM.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_AMLAUNCHER_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NODEMANAGER_CONNECT_RETRIES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Retry times to connect with NM.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_NODEMANAGER_CONNECT_RETRIES" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_PRINCIPAL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The Kerberos principal for the resource manager.]]>
+      </doc>
+    </field>
+    <field name="RM_SCHEDULER_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The address of the scheduler interface.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_SCHEDULER_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_SCHEDULER_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SCHEDULER_MINIMUM_ALLOCATION_MB" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Minimum request grant-able by the RM scheduler.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SCHEDULER_MAXIMUM_ALLOCATION_MB" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Maximum request grant-able by the RM scheduler.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SCHEDULER_CLIENT_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of threads to handle scheduler interface.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_SCHEDULER_CLIENT_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[If the port should be included or not in the node name. The node name
+ is used by the scheduler for resource requests allocation location 
+ matching. Typically this is just the hostname, using the port is needed
+ when using minicluster and specific NM are required.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="QUEUE_PLACEMENT_RULES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Configured scheduler queue placement rules.]]>
+      </doc>
+    </field>
+    <field name="USER_GROUP_PLACEMENT_RULE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[UserGroupMappingPlacementRule configuration string.]]>
+      </doc>
+    </field>
+    <field name="APP_NAME_PLACEMENT_RULE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_WEBAPP_UI_ACTIONS_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable Resource Manager webapp ui actions]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_WEBAPP_UI_ACTIONS_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_RESERVATION_SYSTEM_ENABLE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Whether the RM should enable Reservation System]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_RESERVATION_SYSTEM_ENABLE" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_RESERVATION_SYSTEM_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The class to use as the Reservation System.]]>
+      </doc>
+    </field>
+    <field name="RM_RESERVATION_SYSTEM_PLAN_FOLLOWER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The PlanFollower for the Reservation System.]]>
+      </doc>
+    </field>
+    <field name="RM_RESERVATION_SYSTEM_PLAN_FOLLOWER_TIME_STEP" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The step size of the Reservation System.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_RESERVATION_SYSTEM_PLAN_FOLLOWER_TIME_STEP" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_RESERVATION_SYSTEM_MAX_PERIODICITY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The maximum periodicity for the Reservation System.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_RESERVATION_SYSTEM_MAX_PERIODICITY" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SCHEDULER_ENABLE_MONITORS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable periodic monitor threads.
+ @see #RM_SCHEDULER_MONITOR_POLICIES]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_SCHEDULER_ENABLE_MONITORS" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SCHEDULER_MONITOR_POLICIES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[List of SchedulingEditPolicy classes affecting the scheduler.]]>
+      </doc>
+    </field>
+    <field name="RM_WEBAPP_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The address of the RM web application.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_WEBAPP_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_WEBAPP_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_WEBAPP_HTTPS_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The https address of the RM web application.]]>
+      </doc>
+    </field>
+    <field name="YARN_SSL_CLIENT_HTTPS_NEED_AUTH_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_SSL_SERVER_RESOURCE_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_WEBAPP_HTTPS_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_WEBAPP_HTTPS_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_WEBAPP_UI2_ENABLE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable YARN WebApp V2.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_WEBAPP_UI2_ENABLE" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_WEBAPP_UI2_WARFILE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_API_SERVICES_ENABLE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_YARN_API_SYSTEM_SERVICES_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_RESOURCE_TRACKER_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_RESOURCE_TRACKER_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_RESOURCE_TRACKER_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_AM_EXPIRY_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The expiry interval for application master reporting.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_AM_EXPIRY_INTERVAL_MS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NM_EXPIRY_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[How long to wait until a node manager is considered dead.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_NM_EXPIRY_INTERVAL_MS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_ACL_ENABLE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Are acls enabled.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_ACL_ENABLE" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_RESERVATION_ACL_ENABLE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Are reservation acls enabled.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_RESERVATION_ACL_ENABLE" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_ADMIN_ACL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[ACL of who can be admin of YARN cluster.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_ADMIN_ACL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_YARN_APP_ACL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[ACL used in case none is found. Allows nothing.]]>
+      </doc>
+    </field>
+    <field name="OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Setting that controls whether opportunistic container allocation
+  is enabled or not.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="OPP_CONTAINER_MAX_ALLOCATIONS_PER_AM_HEARTBEAT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Maximum number of opportunistic containers to be allocated in
+ AM heartbeat.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_OPP_CONTAINER_MAX_ALLOCATIONS_PER_AM_HEARTBEAT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="OPP_CONTAINER_ALLOCATION_NODES_NUMBER_USED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of nodes to be used by the Opportunistic Container allocator for
+ dispatching containers during container allocation.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_OPP_CONTAINER_ALLOCATION_NODES_NUMBER_USED" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_QUEUING_SORTING_NODES_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Frequency for computing least loaded NMs.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_QUEUING_SORTING_NODES_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_QUEUING_LOAD_COMPARATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Comparator for determining node load for scheduling of opportunistic
+ containers.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_QUEUING_LOAD_COMPARATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_QUEUING_LIMIT_STDEV" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Value of standard deviation used for calculation of queue limit
+ thresholds.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_QUEUING_LIMIT_STDEV" type="float"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_QUEUING_MIN_QUEUE_LENGTH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Min length of container queue at NodeManager. This is a cluster-wide
+ configuration that acts as the lower-bound of optimal queue length
+ calculated by the NodeQueueLoadMonitor]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_QUEUING_MIN_QUEUE_LENGTH" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_QUEUING_MAX_QUEUE_LENGTH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Max length of container queue at NodeManager. This is a cluster-wide
+ configuration that acts as the upper-bound of optimal queue length
+ calculated by the NodeQueueLoadMonitor]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_QUEUING_MAX_QUEUE_LENGTH" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_QUEUING_MIN_QUEUE_WAIT_TIME_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Min queue wait time for a container at a NodeManager.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_QUEUING_MIN_QUEUE_WAIT_TIME_MS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_QUEUING_MAX_QUEUE_WAIT_TIME_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Max queue wait time for a container queue at a NodeManager.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_QUEUING_MAX_QUEUE_WAIT_TIME_MS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_INTERMEDIATE_DATA_ENCRYPTION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable/disable intermediate-data encryption at YARN level. For now, this
+ only is used by the FileSystemRMStateStore to setup right file-system
+ security attributes.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_INTERMEDIATE_DATA_ENCRYPTION" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ADMIN_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The address of the RM admin interface.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_ADMIN_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_ADMIN_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ADMIN_CLIENT_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of threads used to handle RM admin interface.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_ADMIN_CLIENT_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_AM_MAX_ATTEMPTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The maximum number of application attempts for
+ an application, if unset by user.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_AM_MAX_ATTEMPTS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="GLOBAL_RM_AM_MAX_ATTEMPTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The maximum number of application attempts.
+ It's a global setting for all application masters.]]>
+      </doc>
+    </field>
+    <field name="RM_KEYTAB" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The keytab for the resource manager.]]>
+      </doc>
+    </field>
+    <field name="RM_WEBAPP_SPNEGO_USER_NAME_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The kerberos principal to be used for spnego filter for RM.]]>
+      </doc>
+    </field>
+    <field name="RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The kerberos keytab to be used for spnego filter for RM.]]>
+      </doc>
+    </field>
+    <field name="RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Flag to enable override of the default kerberos authentication filter with
+ the RM authentication filter to allow authentication using delegation
+ tokens(fallback to kerberos if the tokens are missing). Only applicable
+ when the http authentication type is kerberos.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_WEBAPP_DELEGATION_TOKEN_AUTH_FILTER" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_WEBAPP_ENABLE_CORS_FILTER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable cross origin (CORS) support.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_WEBAPP_ENABLE_CORS_FILTER" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[How long to wait until a container is considered dead.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NODES_INCLUDE_FILE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Path to file with nodes to include.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_NODES_INCLUDE_FILE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SUBMISSION_PREPROCESSOR_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable submission pre-processor.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_SUBMISSION_PREPROCESSOR_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SUBMISSION_PREPROCESSOR_FILE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Path to file with hosts for the submission processor to handle.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_SUBMISSION_PREPROCESSOR_FILE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SUBMISSION_PREPROCESSOR_REFRESH_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Submission processor refresh interval.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_SUBMISSION_PREPROCESSOR_REFRESH_INTERVAL_MS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NODES_EXCLUDE_FILE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Path to file with nodes to exclude.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_NODES_EXCLUDE_FILE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of threads to handle resource tracker calls.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_RESOURCE_TRACKER_CLIENT_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NM_REGISTRATION_IP_HOSTNAME_CHECK_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Check IP and hostname resolution during nodemanager registration.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_NM_REGISTRATION_IP_HOSTNAME_CHECK_KEY" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SCHEDULER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The class to use as the resource scheduler.]]>
+      </doc>
+    </field>
+    <field name="RM_PLACEMENT_CONSTRAINTS_HANDLER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Specify which handler will be used to process PlacementConstraints.
+ For details on PlacementConstraints, please refer to
+ {@link org.apache.hadoop.yarn.api.resource.PlacementConstraint}]]>
+      </doc>
+    </field>
+    <field name="DISABLED_RM_PLACEMENT_CONSTRAINTS_HANDLER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This handler rejects all allocate calls made by an application, if they
+ contain a {@link org.apache.hadoop.yarn.api.records.SchedulingRequest}.]]>
+      </doc>
+    </field>
+    <field name="PROCESSOR_RM_PLACEMENT_CONSTRAINTS_HANDLER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Using this handler, the placement of containers with constraints is
+ determined as a pre-processing step before the capacity or the fair
+ scheduler is called. Once the placement is decided, the capacity/fair
+ scheduler is invoked to perform the actual allocation. The advantage of
+ this approach is that it supports all constraint types (affinity,
+ anti-affinity, cardinality). Moreover, it considers multiple containers at
+ a time, which allows to satisfy more constraints than a container-at-a-time
+ approach can achieve. As it sits outside the main scheduler, it can be used
+ by both the capacity and fair schedulers. Note that at the moment it does
+ not account for task priorities within an application, given that such
+ priorities might be conflicting with the placement constraints.]]>
+      </doc>
+    </field>
+    <field name="SCHEDULER_RM_PLACEMENT_CONSTRAINTS_HANDLER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Using this handler, containers with constraints will be placed by the main
+ scheduler. If the configured RM scheduler
+ <pre>yarn.resourcemanager.scheduler.class</pre>
+ cannot handle placement constraints, the corresponding SchedulingRequests
+ will be rejected. As of now, only the capacity scheduler supports
+ SchedulingRequests. In particular, it currently supports anti-affinity
+ constraints (no affinity or cardinality) and places one container at a
+ time. The advantage of this handler compared to the placement-processor is
+ that it follows the same ordering rules for queues (sorted by utilization,
+ priority) and apps (sorted by FIFO/fairness/priority) as the ones followed
+ by the main scheduler.]]>
+      </doc>
+    </field>
+    <field name="RM_PLACEMENT_CONSTRAINTS_ALGORITHM_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Placement Algorithm.]]>
+      </doc>
+    </field>
+    <field name="RM_PLACEMENT_CONSTRAINTS_ALGORITHM_ITERATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Used for BasicPlacementAlgorithm - default SERIAL.]]>
+      </doc>
+    </field>
+    <field name="RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_PLACEMENT_CONSTRAINTS_SCHEDULER_POOL_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_PLACEMENT_CONSTRAINTS_SCHEDULER_POOL_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_SCHEDULER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NM_HEARTBEAT_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[RM set next Heartbeat interval for NM]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NM_HEARTBEAT_INTERVAL_SCALING_ENABLE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable Heartbeat Interval Scaling based on cpu utilization.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_NM_HEARTBEAT_INTERVAL_SCALING_ENABLE" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NM_HEARTBEAT_INTERVAL_MIN_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MIN_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NM_HEARTBEAT_INTERVAL_MAX_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MAX_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NM_HEARTBEAT_INTERVAL_SPEEDUP_FACTOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_NM_HEARTBEAT_INTERVAL_SPEEDUP_FACTOR" type="float"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NM_HEARTBEAT_INTERVAL_SLOWDOWN_FACTOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_NM_HEARTBEAT_INTERVAL_SLOWDOWN_FACTOR" type="float"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of worker threads that write the history data.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SYSTEM_METRICS_PUBLISHER_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The setting that controls whether yarn system metrics is published on the
+  timeline server or not by RM. This configuration setting is for ATS V1.
+  This is now deprecated in favor of SYSTEM_METRICS_PUBLISHER_ENABLED.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SYSTEM_METRICS_PUBLISHER_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The setting that controls whether yarn system metrics is published on the
+  timeline server or not by RM and NM. This configuration setting is for
+  ATS v2.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SYSTEM_METRICS_PUBLISHER_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_PUBLISH_CONTAINER_EVENTS_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The setting that controls whether yarn container events are published to
+ the timeline service or not by RM. This configuration setting is for ATS
+ V2]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_PUBLISH_CONTAINER_EVENTS_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_TIMELINE_SERVER_V1_PUBLISHER_DISPATCHER_BATCH_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_TIMELINE_SERVER_V1_PUBLISHER_DISPATCHER_BATCH_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_TIMELINE_SERVER_V1_PUBLISHER_INTERVAL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_TIMELINE_SERVER_V1_PUBLISHER_INTERVAL" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_TIMELINE_SERVER_V1_PUBLISHER_BATCH_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_TIMELINE_SERVER_V1_PUBLISHER_BATCH_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_DELEGATION_KEY_UPDATE_INTERVAL_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_DELEGATION_TOKEN_RENEW_INTERVAL_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_DELEGATION_TOKEN_MAX_LIFETIME_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_DELEGATION_TOKEN_MAX_CONF_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_DELEGATION_TOKEN_MAX_CONF_SIZE_BYTES" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_DELEGATION_TOKEN_ALWAYS_CANCEL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_DELEGATION_TOKEN_ALWAYS_CANCEL" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_DT_RENEWER_THREAD_TIMEOUT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_DT_RENEWER_THREAD_TIMEOUT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_DT_RENEWER_THREAD_RETRY_INTERVAL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_DT_RENEWER_THREAD_RETRY_INTERVAL" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_DT_RENEWER_THREAD_RETRY_MAX_ATTEMPTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_DT_RENEWER_THREAD_RETRY_MAX_ATTEMPTS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RECOVERY_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_RECOVERY_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_FAIL_FAST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_YARN_FAIL_FAST" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_FAIL_FAST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_WORK_PRESERVING_RECOVERY_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_WORK_PRESERVING_RECOVERY_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ZK_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Zookeeper interaction configs]]>
+      </doc>
+    </field>
+    <field name="RM_ZK_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ZK_NUM_RETRIES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ZK_RM_NUM_RETRIES" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ZK_ZNODE_SIZE_LIMIT_BYTES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Zookeeper znode limit]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_ZK_ZNODE_SIZE_LIMIT_BYTES" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ZK_RETRY_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_ZK_RETRY_INTERVAL_MS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ZK_TIMEOUT_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_ZK_TIMEOUT_MS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ZK_APPID_NODE_SPLIT_INDEX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ZK_APPID_NODE_SPLIT_INDEX" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ZK_DELEGATION_TOKEN_NODE_SPLIT_INDEX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Index at which the RM Delegation Token ids will be split so that the
+ delegation token znodes stored in the zookeeper RM state store will be
+ stored as two different znodes (parent-child).]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_ZK_DELEGATION_TOKEN_NODE_SPLIT_INDEX" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ZK_ACL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_ZK_ACL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ZK_AUTH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ZK_STATE_STORE_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ZK_RM_STATE_STORE_PARENT_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Parent znode path under which ZKRMStateStore will create znodes]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_ZK_RM_STATE_STORE_PARENT_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ZK_RM_STATE_STORE_ROOT_NODE_ACL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Root node ACLs for fencing]]>
+      </doc>
+    </field>
+    <field name="RM_HA_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[HA related configs]]>
+      </doc>
+    </field>
+    <field name="RM_HA_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_HA_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_HA_IDS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_HA_ID" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FS_BASED_RM_CONF_STORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Store the related configuration files in File System]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_FS_BASED_RM_CONF_STORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_CONFIGURATION_PROVIDER_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCHEDULER_CONFIGURATION_STORE_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILE_CONFIGURATION_STORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MEMORY_CONFIGURATION_STORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FS_CONFIGURATION_STORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ZK_CONFIGURATION_STORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LEVELDB_CONFIGURATION_STORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_CONFIGURATION_STORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SCHEDCONF_STORE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SCHEDCONF_LEVELDB_COMPACTION_INTERVAL_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_SCHEDCONF_LEVELDB_COMPACTION_INTERVAL_SECS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SCHEDCONF_MAX_LOGS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_SCHEDCONF_LEVELDB_MAX_LOGS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_SCHEDCONF_ZK_MAX_LOGS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCHEDULER_CONFIGURATION_FS_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCHEDULER_CONFIGURATION_FS_MAX_VERSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_SCHEDULER_CONFIGURATION_FS_MAX_VERSION" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SCHEDCONF_STORE_ZK_PARENT_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Parent znode path under which ZKConfigurationStore will create znodes.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_SCHEDCONF_STORE_ZK_PARENT_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_SCHEDULER_MUTATION_ACL_POLICY_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_AUTHORIZATION_PROVIDER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="AUTO_FAILOVER_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="AUTO_FAILOVER_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_AUTO_FAILOVER_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="AUTO_FAILOVER_EMBEDDED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="This property should never be set to {@code false}.">
+      <doc>
+      <![CDATA[This property controls whether {@link ActiveStandbyElector} leader
+ election should be used when {@link #CURATOR_LEADER_ELECTOR} is
+ {@code false}.
+
+ @deprecated This property should never be set to {@code false}.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_AUTO_FAILOVER_EMBEDDED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="The {@link #AUTO_FAILOVER_EMBEDDED} property is deprecated.">
+      <doc>
+      <![CDATA[The default value for {@link #AUTO_FAILOVER_EMBEDDED}.
+
+ @deprecated The {@link #AUTO_FAILOVER_EMBEDDED} property is deprecated.]]>
+      </doc>
+    </field>
+    <field name="AUTO_FAILOVER_ZK_BASE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_AUTO_FAILOVER_ZK_BASE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CLIENT_FAILOVER_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CLIENT_FAILOVER_PROXY_PROVIDER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_CLIENT_FAILOVER_PROXY_PROVIDER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CLIENT_FAILOVER_NO_HA_PROXY_PROVIDER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_CLIENT_FAILOVER_NO_HA_PROXY_PROVIDER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CLIENT_FAILOVER_MAX_ATTEMPTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CLIENT_FAILOVER_SLEEPTIME_BASE_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CLIENT_FAILOVER_SLEEPTIME_MAX_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CLIENT_FAILOVER_RETRIES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_CLIENT_FAILOVER_RETRIES" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_CLIENT_FAILOVER_RETRIES_ON_SOCKET_TIMEOUTS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_HA_FC_ELECTOR_ZK_RETRIES_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[number of zookeeper operation retry times in ActiveStandbyElector]]>
+      </doc>
+    </field>
+    <field name="CURATOR_LEADER_ELECTOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="Eventually, we want to default to the curator-based
+ implementation and remove the {@link ActiveStandbyElector} based
+ implementation. We should remove this config then.">
+      <doc>
+      <![CDATA[Whether to use the Curator-based elector for leader election.
+
+ @deprecated Eventually, we want to default to the curator-based
+ implementation and remove the {@link ActiveStandbyElector} based
+ implementation. We should remove this config then.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_CURATOR_LEADER_ELECTOR_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_STORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The class to use as the persistent store.]]>
+      </doc>
+    </field>
+    <field name="FS_RM_STATE_STORE_URI" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[URI for FileSystemRMStateStore]]>
+      </doc>
+    </field>
+    <field name="FS_RM_STATE_STORE_NUM_RETRIES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FS_RM_STATE_STORE_NUM_RETRIES" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FS_RM_STATE_STORE_RETRY_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FS_RM_STATE_STORE_RETRY_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_LEVELDB_STORE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_LEVELDB_COMPACTION_INTERVAL_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The time in seconds between full compactions of the leveldb database.
+  Setting the interval to zero disables the full compaction cycles.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_LEVELDB_COMPACTION_INTERVAL_SECS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_MAX_COMPLETED_APPLICATIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The maximum number of completed applications RM keeps. By default equals
+ to {@link #DEFAULT_RM_MAX_COMPLETED_APPLICATIONS}.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_MAX_COMPLETED_APPLICATIONS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The maximum number of completed applications RM state store keeps. By
+ default equals to value of {@link #RM_MAX_COMPLETED_APPLICATIONS}.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_STATE_STORE_MAX_COMPLETED_APPLICATIONS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="This default value is ignored and will be removed in a future
+ release. The default value of
+ {@code yarn.resourcemanager.state-store.max-completed-applications} is the
+ value of {@link #RM_MAX_COMPLETED_APPLICATIONS}.">
+      <doc>
+      <![CDATA[The default value for
+ {@code yarn.resourcemanager.state-store.max-completed-applications}.
+ @deprecated This default value is ignored and will be removed in a future
+ release. The default value of
+ {@code yarn.resourcemanager.state-store.max-completed-applications} is the
+ value of {@link #RM_MAX_COMPLETED_APPLICATIONS}.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_APPLICATION_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default application name]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_APPLICATION_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default application type]]>
+      </doc>
+    </field>
+    <field name="APPLICATION_TYPE_LENGTH" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default application type length]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_QUEUE_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default queue name]]>
+      </doc>
+    </field>
+    <field name="RM_METRICS_RUNTIME_BUCKETS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Buckets (in minutes) for the number of apps running in each queue.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_METRICS_RUNTIME_BUCKETS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default sizes of the runtime metric buckets in minutes.]]>
+      </doc>
+    </field>
+    <field name="RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_CONTAINER_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NMTOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_NMTOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NODEMANAGER_MINIMUM_VERSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_NODEMANAGER_MINIMUM_VERSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeout(msec) for an untracked node to remain in shutdown or decommissioned
+ state.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_PROXY_USER_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[RM proxy users' prefix]]>
+      </doc>
+    </field>
+    <field name="RM_RESOURCE_PROFILES_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable/disable resource profiles.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_RESOURCE_PROFILES_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_RESOURCE_PROFILES_SOURCE_FILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[File containing resource profiles.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_RESOURCE_PROFILES_SOURCE_FILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_CLIENT_LOAD_RESOURCETYPES_FROM_SERVER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable/disable loading resource-types.xml at client side.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_CLIENT_LOAD_RESOURCETYPES_FROM_SERVER" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NODE_GRACEFUL_DECOMMISSION_TIMEOUT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeout in seconds for YARN node graceful decommission.
+ This is the maximal time to wait for running containers and applications
+ to complete before transition a DECOMMISSIONING node into DECOMMISSIONED.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_NODE_GRACEFUL_DECOMMISSION_TIMEOUT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_DECOMMISSIONING_NODES_WATCHER_POLL_INTERVAL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Period in seconds of the poll timer task inside DecommissioningNodesWatcher
+ to identify and take care of DECOMMISSIONING nodes missing regular heart beat.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_DECOMMISSIONING_NODES_WATCHER_POLL_INTERVAL" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Prefix for all node manager configs.]]>
+      </doc>
+    </field>
+    <field name="NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Max Queue length of <code>OPPORTUNISTIC</code> containers on the NM.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DIST_SCHEDULING_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Setting that controls whether distributed scheduling is enabled or not.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_DIST_SCHEDULING_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_ADMIN_USER_ENV" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Environment variables that will be sent to containers.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_ADMIN_USER_ENV" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_ADMIN_FORCE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[PATH components that will be prepended to the user's path.
+ If this is defined and the user does not define PATH, NM will also
+ append ":$PATH" to prevent this from eclipsing the PATH defined in
+ the container. This feature is only available for Linux.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_ADMIN_FORCE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_ENV_WHITELIST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Environment variables that containers may override rather than use NodeManager's default.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_ENV_WHITELIST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[address of node manager IPC.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_BIND_HOST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The actual bind address for the NM.]]>
+      </doc>
+    </field>
+    <field name="NM_CONTAINER_EXECUTOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[who will execute(launch) the containers.]]>
+      </doc>
+    </field>
+    <field name="NM_CONTAINER_STATE_TRANSITION_LISTENERS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[List of container state transition listeners.]]>
+      </doc>
+    </field>
+    <field name="NM_CONTAINER_EXECUTOR_SCHED_PRIORITY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Adjustment to make to the container os scheduling priority.
+ The valid values for this could vary depending on the platform.
+ On Linux, higher values mean run the containers at a less 
+ favorable priority than the NM. 
+ The value specified is an int.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_MGR_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of threads container manager uses.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_MGR_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_COLLECTOR_SERVICE_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of threads container manager uses.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_COLLECTOR_SERVICE_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_DELETE_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of threads used in cleanup.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DELETE_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_EXECUTOR_EXIT_FILE_TIMEOUT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_EXECUTOR_EXIT_FILE_TIMEOUT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_KEYTAB" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Keytab for NM.]]>
+      </doc>
+    </field>
+    <field name="NM_LOCAL_DIRS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[List of directories to store localized files in.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LOCAL_DIRS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of files in each localized directories
+ Avoid tuning this too low.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LOCALIZER_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Address where the localizer IPC is.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LOCALIZER_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_LOCALIZER_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_COLLECTOR_SERVICE_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Address where the collector service IPC is.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_COLLECTOR_SERVICE_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_COLLECTOR_SERVICE_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_PUBLISH_CONTAINER_EVENTS_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The setting that controls whether yarn container events are published to
+ the timeline service or not by NM. This configuration setting is for ATS
+ V2]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_PUBLISH_CONTAINER_EVENTS_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Interval in between cache cleanups.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LOCALIZER_CACHE_TARGET_SIZE_MB" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Target size of localizer cache in MB, per nodemanager. It is a target
+ retention size that only includes resources with PUBLIC and PRIVATE
+ visibility and excludes resources with APPLICATION visibility]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LOCALIZER_CACHE_TARGET_SIZE_MB" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LOCALIZER_CLIENT_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of threads to handle localization requests.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LOCALIZER_CLIENT_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LOCALIZER_FETCH_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of threads to use for localization fetching.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LOCALIZER_FETCH_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LOG_DIRS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Where to store container logs.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LOG_DIRS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LOG_AGGREGATION_THREAD_POOL_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The number of threads to handle log aggregation in node manager.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LOG_AGGREGATION_THREAD_POOL_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_DEFAULT_CONTAINER_EXECUTOR_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default permissions for container logs.]]>
+      </doc>
+    </field>
+    <field name="NM_DEFAULT_CONTAINER_EXECUTOR_LOG_DIRS_PERMISSIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_DEFAULT_CONTAINER_EXECUTOR_LOG_DIRS_PERMISSIONS_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_RESOURCEMANAGER_MINIMUM_VERSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_RESOURCEMANAGER_MINIMUM_VERSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DISK_VALIDATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Disk Validator.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_DISK_VALIDATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_DIAGNOSTICS_MAXIMUM_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Maximum size of contain's diagnostics to keep for relaunching container
+ case.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_DIAGNOSTICS_MAXIMUM_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_RETRY_MINIMUM_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Minimum container restart interval.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_RETRY_MINIMUM_INTERVAL_MS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_QUEUING_USE_PAUSE_FOR_PREEMPTION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Use container pause as the preemption policy over kill in the container
+ queue at a NodeManager.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_QUEUING_USE_PAUSE_FOR_PREEMPTION" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Interval at which the delayed token removal thread runs]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_DELEGATION_TOKEN_RENEWER_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Delegation Token renewer thread count]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_DELEGATION_TOKEN_RENEWER_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_PROXY_USER_PRIVILEGES_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_PROXY_USER_PRIVILEGES_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NODE_IP_CACHE_EXPIRY_INTERVAL_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The expiry interval for node IP caching. -1 disables the caching]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_NODE_IP_CACHE_EXPIRY_INTERVAL_SECS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_MAX_LOG_AGGREGATION_DIAGNOSTICS_IN_MEMORY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[How many diagnostics/failure messages can be saved in RM for
+ log aggregation. It also defines the number of diagnostics/failure
+ messages can be shown in log aggregation web ui.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_MAX_LOG_AGGREGATION_DIAGNOSTICS_IN_MEMORY" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LOG_AGGREGATION_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Whether to enable log aggregation]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_LOG_AGGREGATION_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LOG_AGGREGATION_FILE_FORMATS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LOG_AGGREGATION_FILE_CONTROLLER_FMT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LOG_AGGREGATION_REMOTE_APP_LOG_DIR_FMT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LOG_AGGREGATION_REMOTE_APP_LOG_DIR_SUFFIX_FMT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LOG_AGGREGATION_RETAIN_SECONDS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[How long to wait before deleting aggregated logs, -1 disables.
+ Be careful set this too small and you will spam the name node.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_LOG_AGGREGATION_RETAIN_SECONDS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LOG_AGGREGATION_DEBUG_FILESIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_LOG_AGGREGATION_DEBUG_FILESIZE" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[How long to wait between aggregated log retention checks. If set to
+ a value {@literal <=} 0 then the value is computed as one-tenth of the
+ log retention setting. Be careful set this too small and you will spam
+ the name node.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LOG_AGGREGATION_STATUS_TIME_OUT_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[How long for ResourceManager to wait for NodeManager to report its
+ log aggregation status. If waiting time of which the log aggregation status
+ is reported from NodeManager exceeds the configured value, RM will report
+ log aggregation status for this NodeManager as TIME_OUT.
+
+ This configuration will be used in NodeManager as well to decide
+ whether and when to delete the cached log aggregation status.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_LOG_AGGREGATION_STATUS_TIME_OUT_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LOG_RETAIN_SECONDS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of seconds to retain logs on the NodeManager. Only applicable if Log
+ aggregation is disabled]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LOG_RETAIN_SECONDS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Define how often NMs wake up and upload log files]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MIN_LOG_ROLLING_INTERVAL_SECONDS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The allowed hard minimum limit for {@link
+ YarnConfiguration#NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS}.]]>
+      </doc>
+    </field>
+    <field name="MIN_LOG_ROLLING_INTERVAL_SECONDS_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Define how many aggregated log files per application per NM we can have
+ in remote file system.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LOG_AGGREGATION_NUM_LOG_FILES_SIZE_PER_APP" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LOG_DELETION_THREADS_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of threads used in log cleanup. Only applicable if Log aggregation
+ is disabled]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LOG_DELETE_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_REMOTE_APP_LOG_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Where to aggregate logs to.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_REMOTE_APP_LOG_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_REMOTE_APP_LOG_DIR_SUFFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The remote log dir will be created at below location.
+ NM_REMOTE_APP_LOG_DIR/${user}/bucket_{NM_REMOTE_APP_LOG_DIR_SUFFIX}
+ /${bucketDir}/${appId}]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_REMOTE_APP_LOG_DIR_INCLUDE_OLDER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Specifies whether Older Application Log Directory is included.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_REMOTE_APP_LOG_DIR_INCLUDE_OLDER" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_REMOTE_APP_LOG_DIR_GROUPNAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Specifies the group of the aggregated log directory.]]>
+      </doc>
+    </field>
+    <field name="YARN_LOG_SERVER_URL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_LOG_SERVER_WEBSERVICE_URL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_TRACKING_URL_GENERATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_PMEM_MB" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Amount of memory in MB that can be allocated for containers.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_PMEM_MB" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_SYSTEM_RESERVED_PMEM_MB" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Amount of memory in MB that has been reserved for non-yarn use.]]>
+      </doc>
+    </field>
+    <field name="NM_PMEM_CHECK_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Specifies whether physical memory check is enabled.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_PMEM_CHECK_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_VMEM_CHECK_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Specifies whether virtual memory check is enabled.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_VMEM_CHECK_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_VMEM_PMEM_RATIO" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Conversion ratio for physical memory to virtual memory.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_VMEM_PMEM_RATIO" type="float"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_ELASTIC_MEMORY_CONTROL_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Specifies whether to do memory check on overall usage.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_ELASTIC_MEMORY_CONTROL_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_ELASTIC_MEMORY_CONTROL_OOM_HANDLER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Specifies the OOM handler code.]]>
+      </doc>
+    </field>
+    <field name="NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The path to the OOM listener.]]>
+      </doc>
+    </field>
+    <field name="NM_ELASTIC_MEMORY_CONTROL_OOM_TIMEOUT_SEC" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Maximum time in seconds to resolve an OOM situation.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_ELASTIC_MEMORY_CONTROL_OOM_TIMEOUT_SEC" type="java.lang.Integer"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_VCORES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of Virtual CPU Cores which can be allocated for containers.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_VCORES" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_COUNT_LOGICAL_PROCESSORS_AS_CORES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Count logical processors(like hyperthreads) as cores.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_COUNT_LOGICAL_PROCESSORS_AS_CORES" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_PCORES_VCORES_MULTIPLIER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Multiplier to convert physical cores to vcores.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_PCORES_VCORES_MULTIPLIER" type="float"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Percentage of overall CPU which can be allocated for containers.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_ENABLE_HARDWARE_CAPABILITY_DETECTION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable or disable node hardware capability detection.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_ENABLE_HARDWARE_CAPABILITY_DETECTION" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_MEMORY_RESOURCE_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_MEMORY_RESOURCE_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_MEMORY_RESOURCE_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_MEMORY_RESOURCE_ENFORCED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_MEMORY_RESOURCE_ENFORCED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_MEMORY_RESOURCE_CGROUPS_SWAPPINESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_MEMORY_RESOURCE_CGROUPS_SWAPPINESS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_MEMORY_RESOURCE_CGROUPS_SOFT_LIMIT_PERCENTAGE" type="float"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CPU_RESOURCE_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CPU_RESOURCE_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable cpu isolation.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CPU_RESOURCE_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_DISK_RESOURCE_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Prefix for disk configurations. Work in progress: This configuration
+ parameter may be changed/removed in the future.]]>
+      </doc>
+    </field>
+    <field name="NM_DISK_RESOURCE_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This setting controls if resource handling for disk operations is enabled.
+ Work in progress: This configuration parameter may be changed/removed in
+ the future]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DISK_RESOURCE_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Disk as a resource is disabled by default.]]>
+      </doc>
+    </field>
+    <field name="NM_NETWORK_RESOURCE_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NETWORK_RESOURCE_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This setting controls if resource handling for network bandwidth is
+ enabled. Work in progress: This configuration parameter may be
+ changed/removed in the future]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_NETWORK_RESOURCE_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Network as a resource is disabled by default.]]>
+      </doc>
+    </field>
+    <field name="NM_NETWORK_RESOURCE_INTERFACE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Specifies the interface to be used for applying network throttling rules.
+ Work in progress: This configuration parameter may be changed/removed in
+ the future]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_NETWORK_RESOURCE_INTERFACE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Specifies the total available outbound bandwidth on the node. Work in
+ progress: This configuration parameter may be changed/removed in the future]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_YARN_MBIT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Specifies the total outbound bandwidth available to YARN containers.
+ defaults to NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT if not specified.
+ Work in progress: This configuration parameter may be changed/removed in
+ the future]]>
+      </doc>
+    </field>
+    <field name="NM_RESOURCE_PLUGINS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Prefix for computation resources, example of computation resources like
+ GPU / FPGA / TPU, etc.]]>
+      </doc>
+    </field>
+    <field name="NM_RESOURCE_PLUGINS_FAIL_FAST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Specifies whether the initialization of the Node Manager should continue
+ if a certain device (GPU, FPGA, etc) was not found in the system. If set
+ to "true", then an exception will be thrown if a device is missing or
+ an error occurred during discovery.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_RESOURCE_PLUGINS_FAIL_FAST" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_PLUGGABLE_DEVICE_FRAMEWORK_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This setting controls if pluggable device plugin framework is enabled.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_PLUGGABLE_DEVICE_FRAMEWORK_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The pluggable device plugin framework is disabled by default]]>
+      </doc>
+    </field>
+    <field name="NM_PLUGGABLE_DEVICE_FRAMEWORK_DEVICE_CLASSES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This setting contains vendor plugin class names for
+ device plugin framework to load. Split by comma]]>
+      </doc>
+    </field>
+    <field name="NM_GPU_RESOURCE_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Prefix for gpu configurations. Work in progress: This configuration
+ parameter may be changed/removed in the future.]]>
+      </doc>
+    </field>
+    <field name="NM_GPU_ALLOWED_DEVICES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="AUTOMATICALLY_DISCOVER_GPU_DEVICES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_GPU_PATH_TO_EXEC" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This setting controls where to how to invoke GPU binaries]]>
+      </doc>
+    </field>
+    <field name="NM_GPU_DOCKER_PLUGIN_IMPL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Settings to control which implementation of docker plugin for GPU will be
+ used.
+
+ By default uses NVIDIA docker v1.]]>
+      </doc>
+    </field>
+    <field name="NVIDIA_DOCKER_V1" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NVIDIA_DOCKER_V2" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_GPU_DOCKER_PLUGIN_IMPL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NVIDIA_DOCKER_PLUGIN_V1_ENDPOINT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This setting controls end point of nvidia-docker-v1 plugin.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NVIDIA_DOCKER_PLUGIN_V1_ENDPOINT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_FPGA_RESOURCE_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Prefix for FPGA configurations. Work in progress: This configuration
+ parameter may be changed/removed in the future.]]>
+      </doc>
+    </field>
+    <field name="NM_FPGA_ALLOWED_DEVICES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_FPGA_PATH_TO_EXEC" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_FPGA_VENDOR_PLUGIN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_FPGA_VENDOR_PLUGIN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_FPGA_DEVICE_DISCOVERY_SCRIPT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_FPGA_AVAILABLE_DEVICES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NETWORK_TAG_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NETWORK_TAG_HANDLER_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_NETWORK_TAG_HANDLER_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NETWORK_TAG_MAPPING_MANAGER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NETWORK_TAG_MAPPING_FILE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_NETWORK_RESOURCE_TAG_MAPPING_FILE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_WEBAPP_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[NM Webapp address.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_WEBAPP_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_WEBAPP_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_WEBAPP_HTTPS_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[NM Webapp https address.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_WEBAPP_HTTPS_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_WEBAPP_HTTPS_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_WEBAPP_ENABLE_CORS_FILTER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable/disable CORS filter.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_WEBAPP_ENABLE_CORS_FILTER" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_RESOURCE_MON_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[How often to monitor resource in a node.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_RESOURCE_MON_INTERVAL_MS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_MONITOR_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_MONITOR_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_MON_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[How often to monitor containers.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_MON_INTERVAL_MS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_MON_RESOURCE_CALCULATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Class that calculates current resource utilization.]]>
+      </doc>
+    </field>
+    <field name="NM_CONTAINER_MON_RESOURCE_CALCULATOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Class that calculates containers current resource utilization.]]>
+      </doc>
+    </field>
+    <field name="NM_CONTAINER_MON_PROCESS_TREE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Class that calculates process tree resource utilization.]]>
+      </doc>
+    </field>
+    <field name="PROCFS_USE_SMAPS_BASED_RSS_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_PROCFS_USE_SMAPS_BASED_RSS_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="APPLICATION_TAG_BASED_PLACEMENT_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_APPLICATION_TAG_BASED_PLACEMENT_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="APPLICATION_TAG_BASED_PLACEMENT_USER_WHITELIST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_LOG_MONITOR_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable switch for container log monitoring.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_LOG_MONITOR_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_LOG_MON_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[How often to monitor logs generated by containers.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_LOG_MON_INTERVAL_MS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_LOG_DIR_SIZE_LIMIT_BYTES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The disk space limit for a single container log directory.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_LOG_DIR_SIZE_LIMIT_BYTES" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_LOG_TOTAL_SIZE_LIMIT_BYTES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The disk space limit for all of a container's logs.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_LOG_TOTAL_SIZE_LIMIT_BYTES" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_METRICS_ENABLE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable/disable container metrics.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_METRICS_ENABLE" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_METRICS_PERIOD_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Container metrics flush period. -1 for flush on completion.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_METRICS_PERIOD_MS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_METRICS_UNREGISTER_DELAY_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The delay time ms to unregister container metrics after completion.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_METRICS_UNREGISTER_DELAY_MS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_DISK_HEALTH_CHECK_ENABLE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable/Disable disks' health checker. Default is true. An expert level
+ configuration property.]]>
+      </doc>
+    </field>
+    <field name="NM_DISK_HEALTH_CHECK_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Frequency of running disks' health checker.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DISK_HEALTH_CHECK_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[By default, disks' health is checked every 2 minutes.]]>
+      </doc>
+    </field>
+    <field name="NM_MIN_HEALTHY_DISKS_FRACTION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The minimum fraction of number of disks to be healthy for the nodemanager
+ to launch new containers. This applies to nm-local-dirs and nm-log-dirs.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_MIN_HEALTHY_DISKS_FRACTION" type="float"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[By default, at least 25% of disks are to be healthy to say that the node is
+ healthy in terms of disks.]]>
+      </doc>
+    </field>
+    <field name="NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The maximum percentage of disk space that can be used after which a disk is
+ marked as offline. Values can range from 0.0 to 100.0. If the value is
+ greater than or equal to 100, NM will check for full disk. This applies to
+ nm-local-dirs and nm-log-dirs.
+
+ This applies when disk-utilization-threshold.enabled is true.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE" type="float"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[By default, 90% of the disk can be used before it is marked as offline.]]>
+      </doc>
+    </field>
+    <field name="NM_DISK_UTILIZATION_THRESHOLD_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable/Disable the disk utilisation percentage
+ threshold for disk health checker.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DISK_UTILIZATION_THRESHOLD_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_WM_LOW_PER_DISK_UTILIZATION_PERCENTAGE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The low threshold percentage of disk space used when an offline disk is
+ marked as online. Values can range from 0.0 to 100.0. The value shouldn't
+ be more than NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE. If its value is
+ more than NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE or not set, it will be
+ set to the same value as NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE.
+ This applies to nm-local-dirs and nm-log-dirs.]]>
+      </doc>
+    </field>
+    <field name="NM_MIN_PER_DISK_FREE_SPACE_MB" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The minimum space that must be available on a local dir for it to be used.
+ This applies to nm-local-dirs and nm-log-dirs.
+
+ This applies when disk-free-space-threshold.enabled is true.]]>
+      </doc>
+    </field>
+    <field name="NM_DISK_FREE_SPACE_THRESHOLD_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enable/Disable the minimum disk free
+ space threshold for disk health checker.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DISK_FREE_SPACE_THRESHOLD_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_WM_HIGH_PER_DISK_FREE_SPACE_MB" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The minimum space that must be available on an offline
+ disk for it to be marked as online.  The value should not be less
+ than NM_MIN_PER_DISK_FREE_SPACE_MB.  If its value is less than
+ NM_MIN_PER_DISK_FREE_SPACE_MB or is not set, it will be set to the
+ same value as NM_MIN_PER_DISK_FREE_SPACE_MB.
+ This applies to nm-local-dirs and nm-log-dirs.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_MIN_PER_DISK_FREE_SPACE_MB" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[By default, all of the disk can be used before it is marked as offline.]]>
+      </doc>
+    </field>
+    <field name="NM_HEALTH_CHECK_SCRIPTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The health checker scripts.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_HEALTH_CHECK_SCRIPTS" type="java.lang.String[]"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_HEALTH_CHECK_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Frequency of running node health script.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_HEALTH_CHECK_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_HEALTH_CHECK_RUN_BEFORE_STARTUP" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Whether or not to run the node health script before the NM
+  starts up.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_HEALTH_CHECK_RUN_BEFORE_STARTUP" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_HEALTH_CHECK_TIMEOUT_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Health check time out period for all scripts.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_HEALTH_CHECK_TIMEOUT_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_HEALTH_CHECK_SCRIPT_TIMEOUT_MS_TEMPLATE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Health check script time out period.]]>
+      </doc>
+    </field>
+    <field name="NM_HEALTH_CHECK_SCRIPT_PATH_TEMPLATE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The health check script to run.]]>
+      </doc>
+    </field>
+    <field name="NM_HEALTH_CHECK_SCRIPT_OPTS_TEMPLATE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The arguments to pass to the health check script.]]>
+      </doc>
+    </field>
+    <field name="NM_HEALTH_CHECK_SCRIPT_INTERVAL_MS_TEMPLATE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Frequency of running node health script.]]>
+      </doc>
+    </field>
+    <field name="NM_CONTAINER_LOCALIZER_JAVA_OPTS_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The JVM options used on forking ContainerLocalizer process
+      by container executor.]]>
+      </doc>
+    </field>
+    <field name="NM_CONTAINER_LOCALIZER_JAVA_OPTS_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_LOCALIZER_LOG_LEVEL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The log level of container localizer process.]]>
+      </doc>
+    </field>
+    <field name="NM_CONTAINER_LOCALIZER_LOG_LEVEL_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="LINUX_CONTAINER_RUNTIME_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Prefix for runtime configuration constants.]]>
+      </doc>
+    </field>
+    <field name="LINUX_CONTAINER_RUNTIME_ALLOWED_RUNTIMES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Comma separated list of runtimes that are allowed when using
+ LinuxContainerExecutor. The standard values are:
+ <ul>
+   <li>default</li>
+   <li>docker</li>
+   <li>javasandbox</li>
+   <li>runc</li>
+ </ul>]]>
+      </doc>
+    </field>
+    <field name="LINUX_CONTAINER_RUNTIME_CLASS_FMT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_LINUX_CONTAINER_RUNTIME_ALLOWED_RUNTIMES" type="java.lang.String[]"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default list of allowed runtimes when using LinuxContainerExecutor.]]>
+      </doc>
+    </field>
+    <field name="LINUX_CONTAINER_RUNTIME_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default runtime to be used.]]>
+      </doc>
+    </field>
+    <field name="RUNC_CONTAINER_RUNTIME_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_RUNC_IMAGE_TAG_TO_MANIFEST_PLUGIN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The runc image tag to manifest plugin class that should be used.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_RUNC_IMAGE_TAG_TO_MANIFEST_PLUGIN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default runc image tag to manifest plugin class.]]>
+      </doc>
+    </field>
+    <field name="NM_RUNC_MANIFEST_TO_RESOURCES_PLUGIN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The runc manifest to resources plugin class that should be used.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_RUNC_MANIFEST_TO_RESOURCES_PLUGIN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default runc manifest to resources plugin plugin class.]]>
+      </doc>
+    </field>
+    <field name="NM_RUNC_IMAGE_TOPLEVEL_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The HDFS location under which the oci image manifests, layers,
+ and configs directories exist.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_RUNC_IMAGE_TOPLEVEL_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default HDFS location under which the oci image manifests, layers,
+ and configs directories exist.]]>
+      </doc>
+    </field>
+    <field name="NM_RUNC_LAYER_MOUNTS_TO_KEEP" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Target count of layer mounts that we should keep on disk at one time.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_RUNC_LAYER_MOUNTS_TO_KEEP" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_REAP_RUNC_LAYER_MOUNTS_INTERVAL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The interval in seconds between executions of reaping layer mounts.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_REAP_RUNC_LAYER_MOUNTS_INTERVAL" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_RUNC_IMAGE_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default runc image to be used.]]>
+      </doc>
+    </field>
+    <field name="NM_RUNC_ALLOW_PRIVILEGED_CONTAINERS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Allow privileged containers. Use with extreme care.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_RUNC_ALLOW_PRIVILEGED_CONTAINERS" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Privileged containers are disabled by default.]]>
+      </doc>
+    </field>
+    <field name="NM_RUNC_ALLOWED_CONTAINER_NETWORKS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The set of networks allowed when launching containers using the
+ RuncContainerRuntime.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_RUNC_ALLOWED_CONTAINER_NETWORKS" type="java.lang.String[]"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default set of networks allowed when launching containers using the
+ RuncContainerRuntime.]]>
+      </doc>
+    </field>
+    <field name="NM_RUNC_ALLOWED_CONTAINER_RUNTIMES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The set of runtimes allowed when launching containers using the
+ RuncContainerRuntime.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_RUNC_ALLOWED_CONTAINER_RUNTIMES" type="java.lang.String[]"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default set of runtimes allowed when launching containers using the
+ RuncContainerRuntime.]]>
+      </doc>
+    </field>
+    <field name="NM_RUNC_PRIVILEGED_CONTAINERS_ACL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[ACL list for users allowed to run privileged containers.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_RUNC_PRIVILEGED_CONTAINERS_ACL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default list for users allowed to run privileged containers is empty.]]>
+      </doc>
+    </field>
+    <field name="NM_RUNC_ALLOW_HOST_PID_NAMESPACE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Allow host pid namespace for containers. Use with care.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_RUNC_ALLOW_HOST_PID_NAMESPACE" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Host pid namespace for containers is disabled by default.]]>
+      </doc>
+    </field>
+    <field name="NM_RUNC_DEFAULT_RO_MOUNTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default list of read-only mounts to be bind-mounted into all
+  runC containers that use RuncContainerRuntime.]]>
+      </doc>
+    </field>
+    <field name="NM_RUNC_DEFAULT_RW_MOUNTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default list of read-write mounts to be bind-mounted into all
+  runC containers that use RuncContainerRuntime.]]>
+      </doc>
+    </field>
+    <field name="NM_RUNC_SECCOMP_PROFILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Path to the seccomp profile to use with Runc containers.]]>
+      </doc>
+    </field>
+    <field name="NM_HDFS_RUNC_IMAGE_TAG_TO_HASH_FILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The HDFS location where the runC image tag to hash file exists.]]>
+      </doc>
+    </field>
+    <field name="NM_LOCAL_RUNC_IMAGE_TAG_TO_HASH_FILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The local file system location where the runC image tag to hash file exists.]]>
+      </doc>
+    </field>
+    <field name="NM_RUNC_CACHE_REFRESH_INTERVAL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The interval in seconds between refreshing the hdfs image tag to
+ hash cache.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_RUNC_CACHE_REFRESH_INTERVAL" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default interval in seconds between refreshing the hdfs image tag to
+ hash cache.]]>
+      </doc>
+    </field>
+    <field name="NM_RUNC_NUM_MANIFESTS_TO_CACHE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The number of manifests to cache in the image tag to hash cache.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NUM_MANIFESTS_TO_CACHE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default number of manifests to cache in the image tag to hash cache.]]>
+      </doc>
+    </field>
+    <field name="NM_RUNC_STAT_CACHE_TIMEOUT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The timeout value in seconds for the values in the stat cache.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_RUNC_STAT_CACHE_TIMEOUT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default timeout value in seconds for the values in the stat cache.]]>
+      </doc>
+    </field>
+    <field name="NM_RUNC_STAT_CACHE_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The size of the stat cache which stores stats of the layers and config.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RUNC_STAT_CACHE_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default size of the stat cache which stores stats of the
+ layers and config.]]>
+      </doc>
+    </field>
+    <field name="DOCKER_CONTAINER_RUNTIME_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_DOCKER_IMAGE_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default docker image to be used.]]>
+      </doc>
+    </field>
+    <field name="NM_DOCKER_IMAGE_UPDATE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default option to decide whether to pull the latest image or not.]]>
+      </doc>
+    </field>
+    <field name="NM_DOCKER_CONTAINER_CAPABILITIES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Capabilities allowed (and added by default) for docker containers.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DOCKER_CONTAINER_CAPABILITIES" type="java.lang.String[]"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[These are the default capabilities added by docker. We'll use the same
+ set here. While these may not be case-sensitive from a docker
+ perspective, it is best to keep these uppercase.]]>
+      </doc>
+    </field>
+    <field name="NM_DOCKER_ALLOW_PRIVILEGED_CONTAINERS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Allow privileged containers. Use with extreme care.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DOCKER_ALLOW_PRIVILEGED_CONTAINERS" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Privileged containers are disabled by default.]]>
+      </doc>
+    </field>
+    <field name="NM_DOCKER_ENABLE_USER_REMAPPING" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[enable user remapping.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DOCKER_ENABLE_USER_REMAPPING" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Set enable user remapping as false by default.]]>
+      </doc>
+    </field>
+    <field name="NM_DOCKER_USER_REMAPPING_UID_THRESHOLD" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[lower limit for acceptable uids of user remapped user.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DOCKER_USER_REMAPPING_UID_THRESHOLD" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Set user remapping lower uid limit to 1 by default.]]>
+      </doc>
+    </field>
+    <field name="NM_DOCKER_USER_REMAPPING_GID_THRESHOLD" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[lower limit for acceptable gids of user remapped user.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DOCKER_USER_REMAPPING_GID_THRESHOLD" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Set user remapping lower gid limit to 1 by default.]]>
+      </doc>
+    </field>
+    <field name="NM_DOCKER_PRIVILEGED_CONTAINERS_ACL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[ACL list for users allowed to run privileged containers.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DOCKER_PRIVILEGED_CONTAINERS_ACL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default list for users allowed to run privileged containers is empty.]]>
+      </doc>
+    </field>
+    <field name="NM_DOCKER_ALLOWED_CONTAINER_NETWORKS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The set of networks allowed when launching containers using the
+ DockerContainerRuntime.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DOCKER_ALLOWED_CONTAINER_NETWORKS" type="java.lang.String[]"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The set of networks allowed when launching containers using the
+ DockerContainerRuntime.]]>
+      </doc>
+    </field>
+    <field name="NM_DOCKER_DEFAULT_CONTAINER_NETWORK" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The network used when launching containers using the
+ DockerContainerRuntime when no network is specified in the request. This
+  network must be one of the (configurable) set of allowed container
+  networks.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DOCKER_DEFAULT_CONTAINER_NETWORK" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The network used when launching containers using the
+ DockerContainerRuntime when no network is specified in the request and
+ no default network is configured.
+ .]]>
+      </doc>
+    </field>
+    <field name="NM_DOCKER_ALLOWED_CONTAINER_RUNTIMES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The set of runtimes allowed when launching containers using the
+ DockerContainerRuntime.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DOCKER_ALLOWED_CONTAINER_RUNTIMES" type="java.lang.String[]"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The set of runtimes allowed when launching containers using the
+ DockerContainerRuntime.]]>
+      </doc>
+    </field>
+    <field name="NM_DOCKER_ALLOW_HOST_PID_NAMESPACE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Allow host pid namespace for containers. Use with care.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DOCKER_ALLOW_HOST_PID_NAMESPACE" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Host pid namespace for containers is disabled by default.]]>
+      </doc>
+    </field>
+    <field name="YARN_HTTP_WEBAPP_EXTERNAL_CLASSES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="HADOOP_HTTP_WEBAPP_SCHEDULER_PAGE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="This field is deprecated for
+ {@link #YARN_HTTP_WEBAPP_SCHEDULER_PAGE}">
+      <doc>
+      <![CDATA[@deprecated This field is deprecated for
+ {@link #YARN_HTTP_WEBAPP_SCHEDULER_PAGE}]]>
+      </doc>
+    </field>
+    <field name="YARN_HTTP_WEBAPP_SCHEDULER_PAGE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_HTTP_WEBAPP_CUSTOM_DAO_CLASSES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_HTTP_WEBAPP_CUSTOM_UNWRAPPED_DAO_CLASSES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_DOCKER_ALLOW_DELAYED_REMOVAL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Whether or not users are allowed to request that Docker containers honor
+ the debug deletion delay. This is useful for troubleshooting Docker
+ container related launch failures.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DOCKER_ALLOW_DELAYED_REMOVAL" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default value on whether or not a user can request that Docker
+ containers honor the debug deletion delay.]]>
+      </doc>
+    </field>
+    <field name="NM_DOCKER_STOP_GRACE_PERIOD" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="use {@link YarnConfiguration#NM_SLEEP_DELAY_BEFORE_SIGKILL_MS}">
+      <doc>
+      <![CDATA[A configurable value to pass to the Docker Stop command. This value
+ defines the number of seconds between the docker stop command sending
+ a SIGTERM and a SIGKILL.
+
+ @deprecated use {@link YarnConfiguration#NM_SLEEP_DELAY_BEFORE_SIGKILL_MS}]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_DOCKER_STOP_GRACE_PERIOD" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default value for the grace period between the SIGTERM and the
+ SIGKILL in the Docker Stop command.]]>
+      </doc>
+    </field>
+    <field name="NM_DOCKER_DEFAULT_RO_MOUNTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default list of read-only mounts to be bind-mounted into all
+  Docker containers that use DockerContainerRuntime.]]>
+      </doc>
+    </field>
+    <field name="NM_DOCKER_DEFAULT_RW_MOUNTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default list of read-write mounts to be bind-mounted into all
+  Docker containers that use DockerContainerRuntime.]]>
+      </doc>
+    </field>
+    <field name="NM_DOCKER_DEFAULT_TMPFS_MOUNTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default list of tmpfs mounts to be mounted into all
+  Docker containers that use DockerContainerRuntime.]]>
+      </doc>
+    </field>
+    <field name="YARN_CONTAINER_SANDBOX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The mode in which the Java Container Sandbox should run detailed by
+  the JavaSandboxLinuxContainerRuntime.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_CONTAINER_SANDBOX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Permissions for application local directories.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_CONTAINER_SANDBOX_FILE_PERMISSIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_CONTAINER_SANDBOX_POLICY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Location for non-default java policy file.]]>
+      </doc>
+    </field>
+    <field name="YARN_CONTAINER_SANDBOX_POLICY_GROUP_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Prefix for group to policy file mapping.]]>
+      </doc>
+    </field>
+    <field name="YARN_CONTAINER_SANDBOX_WHITELIST_GROUP" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The group which will run by default without the java security manager.]]>
+      </doc>
+    </field>
+    <field name="NM_LINUX_CONTAINER_EXECUTOR_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The path to the Linux container executor.]]>
+      </doc>
+    </field>
+    <field name="NM_NONSECURE_MODE_LIMIT_USERS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[True if linux-container-executor should limit itself to one user
+ when running in non-secure mode.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_NONSECURE_MODE_LIMIT_USERS" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NONSECURE_MODE_LOCAL_USER_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The UNIX user that containers will run as when Linux-container-executor
+ is used in nonsecure mode (a use case for this is using cgroups).]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_NONSECURE_MODE_LOCAL_USER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NONSECURE_MODE_USER_PATTERN_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The allowed pattern for UNIX user names enforced by 
+ Linux-container-executor when used in nonsecure mode (use case for this 
+ is using cgroups). The default value is taken from /usr/sbin/adduser]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_NONSECURE_MODE_USER_PATTERN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LINUX_CONTAINER_RESOURCES_HANDLER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The type of resource enforcement to use with the
+  linux container executor.]]>
+      </doc>
+    </field>
+    <field name="NM_LINUX_CONTAINER_CGROUPS_HIERARCHY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The path the linux container executor should use for cgroups]]>
+      </doc>
+    </field>
+    <field name="NM_LINUX_CONTAINER_CGROUPS_MOUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Whether the linux container executor should mount cgroups if not found]]>
+      </doc>
+    </field>
+    <field name="NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Where the linux container executor should mount cgroups if not found]]>
+      </doc>
+    </field>
+    <field name="NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Whether the apps should run in strict resource usage mode(not allowed to
+ use spare CPU)]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_APPLICATION_MONITOR_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_APPLICATION_HTTPS_POLICY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Specifies what the RM does regarding HTTPS enforcement for communication
+ with AM Web Servers, as well as generating and providing certificates.
+ Possible values are:
+ <ul>
+   <li>NONE - the RM will do nothing special.</li>
+   <li>LENIENT - the RM will generate and provide a keystore and truststore
+   to the AM, which it is free to use for HTTPS in its tracking URL web
+   server.  The RM proxy will still allow HTTP connections to AMs that opt
+   not to use HTTPS.</li>
+   <li>STRICT - this is the same as LENIENT, except that the RM proxy will
+   only allow HTTPS connections to AMs; HTTP connections will be blocked
+   and result in a warning page to the user.</li>
+ </ul>]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_APPLICATION_HTTPS_POLICY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_PROXY_TIMEOUT_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFALUT_RM_PROXY_TIMEOUT_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_PROXY_CONNECTION_TIMEOUT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_PROXY_CONNECTION_TIMEOUT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Interval of time the linux container executor should try cleaning up
+ cgroups entry when cleaning up a container. This is required due to what 
+ it seems a race condition because the SIGTERM/SIGKILL is asynch.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LINUX_CONTAINER_CGROUPS_DELETE_DELAY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Delay between attempts to remove linux cgroup.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_DELAY" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Indicates if memory and CPU limits will be set for the Windows Job
+ Object for the containers launched by the default container executor.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_WINDOWS_SECURE_CONTAINER_GROUP" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[/* The Windows group that the windows-secure-container-executor should run as.]]>
+      </doc>
+    </field>
+    <field name="NM_LOG_AGG_COMPRESSION_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[T-file compression types used to compress aggregated logs.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_LOG_AGG_COMPRESSION_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_PRINCIPAL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The kerberos principal for the node manager.]]>
+      </doc>
+    </field>
+    <field name="NM_AUX_SERVICES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_AUX_SERVICES_MANIFEST_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Boolean indicating whether loading aux services from a manifest is
+ enabled. If enabled, aux services may be dynamically modified through
+ reloading the manifest via filesystem changes or a REST API. When
+ enabled, aux services configuration properties unrelated to the manifest
+ will be ignored.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_AUX_SERVICES_MANIFEST_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_AUX_SERVICES_MANIFEST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[File containing auxiliary service specifications.]]>
+      </doc>
+    </field>
+    <field name="NM_AUX_SERVICES_MANIFEST_RELOAD_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Interval at which manifest file will be reloaded when modifications are
+ found (0 or less means that the file will not be checked for modifications
+ and reloaded).]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_AUX_SERVICES_MANIFEST_RELOAD_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_AUX_SERVICE_FMT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_AUX_SERVICES_CLASSPATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_AUX_SERVICE_REMOTE_CLASSPATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_AUX_SERVICES_SYSTEM_CLASSES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_USER_HOME_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_STDERR_PATTERN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_STDERR_PATTERN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINER_STDERR_BYTES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_CONTAINER_STDERR_BYTES" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_WEBAPP_SPNEGO_USER_NAME_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The kerberos principal to be used for spnego filter for NM.]]>
+      </doc>
+    </field>
+    <field name="NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The kerberos keytab to be used for spnego filter for NM.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_USER_HOME_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_RECOVERY_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_RECOVERY_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_RECOVERY_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_RECOVERY_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_RECOVERY_COMPACTION_INTERVAL_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The time in seconds between full compactions of the NM state database.
+  Setting the interval to zero disables the full compaction cycles.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_RECOVERY_COMPACTION_INTERVAL_SECS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_RECOVERY_SUPERVISED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_RECOVERY_SUPERVISED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LOG_AGG_POLICY_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_LOG_AGG_POLICY_CLASS_PARAMETERS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="PROXY_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="PROXY_PRINCIPAL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The kerberos principal for the proxy.]]>
+      </doc>
+    </field>
+    <field name="PROXY_KEYTAB" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Keytab for Proxy.]]>
+      </doc>
+    </field>
+    <field name="PROXY_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The address for the web proxy.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_PROXY_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_PROXY_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="PROXY_BIND_HOST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Binding address for the web proxy.]]>
+      </doc>
+    </field>
+    <field name="YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCETRACKER_PROTOCOL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[YARN Service Level Authorization]]>
+      </doc>
+    </field>
+    <field name="YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONCLIENT_PROTOCOL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCEMANAGER_ADMINISTRATION_PROTOCOL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_PROTOCOL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_SECURITY_SERVICE_AUTHORIZATION_DISTRIBUTEDSCHEDULING_PROTOCOL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_SECURITY_SERVICE_AUTHORIZATION_CONTAINER_MANAGEMENT_PROTOCOL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_SECURITY_SERVICE_AUTHORIZATION_RESOURCE_LOCALIZER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONHISTORY_PROTOCOL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_SECURITY_SERVICE_AUTHORIZATION_COLLECTOR_NODEMANAGER_PROTOCOL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_SECURITY_SERVICE_AUTHORIZATION_APPLICATIONMASTER_NODEMANAGER_PROTOCOL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_SLEEP_DELAY_BEFORE_SIGKILL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[No. of milliseconds to wait between sending a SIGTERM and SIGKILL
+ to a running container]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_SLEEP_DELAY_BEFORE_SIGKILL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_PROCESS_KILL_WAIT_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Max time to wait for a process to come up when trying to cleanup
+ container resources]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_PROCESS_KILL_WAIT_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RESOURCEMANAGER_CONNECT_MAX_WAIT_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Max time to wait to establish a connection to RM]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RESOURCEMANAGER_CONNECT_MAX_WAIT_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Time interval between each attempt to connect to RM]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DISPATCHER_DRAIN_EVENTS_TIMEOUT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_DISPATCHER_DRAIN_EVENTS_TIMEOUT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_DISPATCHER_PRINT_EVENTS_INFO_THRESHOLD" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The threshold used to trigger the logging of event types and counts
+  in RM's main event dispatcher. Default value is 5000,
+  which means RM will print events info when the queue size cumulatively
+  reaches 5000 every time. Such info can be used to reveal what
+  kind of events that RM is stuck at processing mostly,
+  it can help to narrow down certain performance issues.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_DISPATCHER_PRINT_EVENTS_INFO_THRESHOLD" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_DISPATCHER_CPU_MONITOR_SAMPLES_PER_MIN" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Resource manager dispatcher thread monitor sampling rate.
+ Units are samples per minute.  This controls how often to sample
+ the cpu utilization of the resource manager dispatcher thread.
+ The cpu utilization is displayed on the RM UI as scheduler busy %.
+ Set to zero to disable the dispatcher thread monitor.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_DISPATCHER_CPU_MONITOR_SAMPLES_PER_MIN" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_APPLICATION_CLASSPATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[CLASSPATH for YARN applications. A comma-separated list of CLASSPATH
+ entries]]>
+      </doc>
+    </field>
+    <field name="AMRM_PROXY_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The setting that controls whether AMRMProxy is enabled or not.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_AMRM_PROXY_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="AMRM_PROXY_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_AMRM_PROXY_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_AMRM_PROXY_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="AMRM_PROXY_CLIENT_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_AMRM_PROXY_CLIENT_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="AMRM_PROXY_HA_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_AMRM_PROXY_HA_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH" type="java.lang.String[]"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default platform-agnostic CLASSPATH for YARN applications. A
+ comma-separated list of CLASSPATH entries. The parameter expansion marker
+ will be replaced with real parameter expansion marker ('%' for Windows and
+ '$' for Linux) by NodeManager on container launch. For example: {{VAR}}
+ will be replaced as $VAR on Linux, and %VAR% on Windows.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_APPLICATION_CLASSPATH" type="java.lang.String[]"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[<p>
+ Default platform-specific CLASSPATH for YARN applications. A
+ comma-separated list of CLASSPATH entries constructed based on the client
+ OS environment expansion syntax.
+ </p>
+ <p>
+ Note: Use {@link #DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH} for
+ cross-platform practice i.e. submit an application from a Windows client to
+ a Linux/Unix server or vice versa.
+ </p>]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_CONTAINER_TEMP_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Container temp directory]]>
+      </doc>
+    </field>
+    <field name="IS_MINI_YARN_CLUSTER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_MC_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_MINICLUSTER_FIXED_PORTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Whether to use fixed ports with the minicluster.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_MINICLUSTER_FIXED_PORTS" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default is false to be able to run tests concurrently without port
+ conflicts.]]>
+      </doc>
+    </field>
+    <field name="YARN_MINICLUSTER_USE_RPC" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Whether the NM should use RPC to connect to the RM. Default is false.
+ Can be set to true only when using fixed ports.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_MINICLUSTER_USE_RPC" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Whether users are explicitly trying to control resource monitoring
+ configuration for the MiniYARNCluster. Disabled by default.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_MINICLUSTER_NM_PMEM_MB" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Allow changing the memory for the NodeManager in the MiniYARNCluster]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_MINICLUSTER_NM_PMEM_MB" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_APP_CONTAINER_LOG_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The log directory for the containers]]>
+      </doc>
+    </field>
+    <field name="YARN_APP_CONTAINER_LOG_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_APP_CONTAINER_LOG_BACKUPS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_VERSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_VERSION" type="float"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_VERSIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_UI_NAMES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Comma separated list of names for UIs hosted in the timeline server
+ (For pluggable UIs).]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_UI_WEB_PATH_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Relative web path that will serve up this UI (For pluggable UIs).]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_CLIENT_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline client settings.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_UI_ON_DISK_PATH_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Path to war file or static content directory for this UI
+ (For pluggable UIs).]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The setting for timeline service v1.5]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_CACHE_STORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITY_GROUP_PLUGIN_CLASSES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITY_GROUP_PLUGIN_CLASSPATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITY_GROUP_PLUGIN_SYSTEM_CLASSES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SUMMARY_STORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SUMMARY_ENTITY_TYPES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SCAN_INTERVAL_SECONDS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_SCAN_INTERVAL_SECONDS_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_THREADS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_THREADS_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_APP_CACHE_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_APP_CACHE_SIZE_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_CLEANER_INTERVAL_SECONDS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_CLEANER_INTERVAL_SECONDS_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETAIN_SECONDS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETAIN_SECONDS_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_UNKNOWN_ACTIVE_SECONDS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_UNKNOWN_ACTIVE_SECONDS_DEFAULT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_LEVELDB_CACHE_READ_CACHE_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_LEVELDB_CACHE_READ_CACHE_SIZE" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_CLIENT_FD_FLUSH_INTERVAL_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_CLIENT_FD_FLUSH_INTERVAL_SECS_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_CLIENT_FD_CLEAN_INTERVAL_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_CLIENT_FD_CLEAN_INTERVAL_SECS_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_CLIENT_FD_RETAIN_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_CLIENT_FD_RETAIN_SECS_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_CLIENT_INTERNAL_TIMERS_TTL_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_CLIENT_INTERNAL_TIMERS_TTL_SECS_DEFAULT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_CLIENT_INTERNAL_ATTEMPT_DIR_CACHE_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_CLIENT_INTERNAL_ATTEMPT_DIR_CACHE_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYFILE_FS_SUPPORT_APPEND" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_WITH_USER_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_READER_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Settings for timeline service v2.0.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_WRITER_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_WRITER_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_READER_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_READER_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_SCHEMA_CREATOR_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_SCHEMA_CREATOR_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[default schema prefix for hbase tables.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_HBASE_SCHEMA_PREFIX_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[config param name to override schema prefix.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_WRITER_FLUSH_INTERVAL_SECONDS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The setting that controls how often the timeline collector flushes the
+ timeline writer.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_WRITER_FLUSH_INTERVAL_SECONDS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_WRITER_ASYNC_QUEUE_CAPACITY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The setting that controls the capacity of the queue for async writes
+ to timeline collector.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_WRITER_ASYNC_QUEUE_CAPACITY" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="APP_FINAL_VALUE_RETENTION_THRESHOLD" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The name for setting that controls how long the final value of
+ a metric of a completed app is retained before merging
+ into the flow sum.]]>
+      </doc>
+    </field>
+    <field name="FLOW_RUN_COPROCESSOR_JAR_HDFS_LOCATION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The name of the setting for the location of the coprocessor
+ jar on hdfs.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_HDFS_LOCATION_FLOW_RUN_COPROCESSOR_JAR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[default hdfs location for flowrun coprocessor jar.]]>
+      </doc>
+    </field>
+    <field name="FLOW_NAME_MAX_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[This setting controls the max size of the flow name getting generated
+ in ATSv2 after removing UUID if present.]]>
+      </doc>
+    </field>
+    <field name="FLOW_NAME_DEFAULT_MAX_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default setting for flow name size has no size restriction
+ after removing UUID if present.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The name for setting that points to an optional HBase configuration
+ (hbase-site.xml file) with settings that will override the ones found on
+ the classpath.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_READ_AUTH_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The name for setting that enables or disables authentication checks
+ for reading timeline service v2 data.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_AGGREGATION_INTERVAL_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The name for setting that controls how often in-memory app level
+ aggregation is kicked off in timeline collector.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_AGGREGATION_INTERVAL_SECS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_READ_AUTH_ENABLED" type="java.lang.Boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default setting for authentication checks for reading timeline
+ service v2 data.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_READ_ALLOWED_USERS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The name for setting that lists the users and groups who are allowed
+ to read timeline service v2 data. It is a comma separated list of
+ user, followed by space, then comma separated list of groups.
+ It will allow this list of users and groups to read the data
+ and reject everyone else.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_READ_ALLOWED_USERS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The default value for list of the users who are allowed to read
+ timeline service v2 data.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_APP_FINAL_VALUE_RETENTION_THRESHOLD" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The setting that controls how long the final value of a metric of a
+ completed app is retained before merging into the flow sum. Up to this time
+ after an application is completed out-of-order values that arrive can be
+ recognized and discarded at the cost of increased storage.]]>
+      </doc>
+    </field>
+    <field name="ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NUMBER_OF_ASYNC_ENTITIES_TO_MERGE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NUMBER_OF_ASYNC_ENTITIES_TO_MERGE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FLOW_VERSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[default version for any flow.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_V2_CLIENT_DRAIN_TIME_MILLIS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The time period for which timeline v2 client will wait for draining
+ leftover entities after stop.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_V2_CLIENT_DRAIN_TIME_MILLIS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="APPLICATION_HISTORY_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="APPLICATION_HISTORY_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The setting that controls whether application history service is
+  enabled or not.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_APPLICATION_HISTORY_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="APPLICATION_HISTORY_STORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Application history store class]]>
+      </doc>
+    </field>
+    <field name="APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Save container meta-info in the application history store.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FS_APPLICATION_HISTORY_STORE_URI" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[URI for FileSystemApplicationHistoryStore]]>
+      </doc>
+    </field>
+    <field name="FS_APPLICATION_HISTORY_STORE_COMPRESSION_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[T-file compression types used to compress history data.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_FS_APPLICATION_HISTORY_STORE_COMPRESSION_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The setting that controls whether timeline service is enabled or not.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[host:port address for timeline service RPC APIs.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_BIND_HOST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The listening endpoint for the timeline service application.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_BIND_HOST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_HANDLER_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The number of threads to handle client RPC API requests.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_CLIENT_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_WEBAPP_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The address of the timeline service web application.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_WEBAPP_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_WEBAPP_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The https address of the timeline service web application.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_WEBAPP_HTTPS_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="APPLICATION_HISTORY_MAX_APPS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Defines the max number of applications could be fetched using
+ REST API or application history protocol and shown in timeline
+ server web ui.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_APPLICATION_HISTORY_MAX_APPS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_STORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline service store class.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_TTL_ENABLE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline service enable data age off]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_TTL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline service length of time to retain data]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_TTL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_ROLLING_PERIOD" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline service rolling period. Valid values are daily, half_daily,
+ quarter_daily, and hourly.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_ROLLING_PERIOD" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Roll a new database each hour.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_LEVELDB_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Implementation specific configuration prefix for Timeline Service
+ leveldb.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_LEVELDB_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline service leveldb path]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline service leveldb read cache (uncompressed blocks). This is
+ per rolling instance so should be tuned if using rolling leveldb
+ timeline store]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default leveldb read cache size if no configuration is specified.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline service leveldb write buffer size.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default leveldb write buffer size if no configuration is specified. This
+ is per rolling instance so should be tuned if using rolling leveldb
+ timeline store.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_LEVELDB_WRITE_BATCH_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline service leveldb write batch size. This value can be tuned down
+ to reduce lock time for ttl eviction.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BATCH_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default leveldb write batch size is no configuration is specified]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline service leveldb start time read cache (number of entities)]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline service leveldb start time write cache (number of entities)]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline service leveldb interval to wait between deletion rounds]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline service leveldb number of concurrent open files. Tuned this
+ configuration to stay within system limits. This is per rolling instance
+ so should be tuned if using rolling leveldb timeline store.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default leveldb max open files if no configuration is specified.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_PRINCIPAL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The Kerberos principal for the timeline server.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_KEYTAB" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The Kerberos keytab for the timeline server.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Enables cross origin support for timeline server.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_HTTP_CROSS_ORIGIN_ENABLED_DEFAULT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default value for cross origin support for timeline server.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_CLIENT_MAX_RETRIES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline client call, max retries (-1 means no limit)]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_CLIENT_MAX_RETRIES" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline client call, retry interval]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_CLIENT_RETRY_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_CLIENT_BEST_EFFORT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline client policy for whether connections are fatal]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_CLIENT_BEST_EFFORT" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_RECOVERY_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Flag to enable recovery of timeline service]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_RECOVERY_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_STATE_STORE_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline service state store class]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_LEVELDB_STATE_STORE_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_LEVELDB_STATE_STORE_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Timeline service state store leveldb path]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_DELEGATION_KEY_UPDATE_INTERVAL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_DELEGATION_KEY_UPDATE_INTERVAL" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_DELEGATION_TOKEN_RENEW_INTERVAL" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_DELEGATION_TOKEN_MAX_LIFETIME" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_OFFLINE_AGGREGATION_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="PHOENIX_OFFLINE_STORAGE_CONN_STR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="PHOENIX_OFFLINE_STORAGE_CONN_STR_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SHARED_CACHE_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SHARED_CACHE_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[whether the shared cache is enabled/disabled]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SHARED_CACHE_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SHARED_CACHE_ROOT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The config key for the shared cache root directory.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SHARED_CACHE_ROOT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SHARED_CACHE_NESTED_LEVEL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The config key for the level of nested directories before getting to the
+ checksum directory.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SHARED_CACHE_NESTED_LEVEL" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCM_STORE_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCM_STORE_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_SCM_STORE_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCM_APP_CHECKER_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_SCM_APP_CHECKER_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCM_ADMIN_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The address of the SCM admin interface.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SCM_ADMIN_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_SCM_ADMIN_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCM_ADMIN_CLIENT_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Number of threads used to handle SCM admin interface.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SCM_ADMIN_CLIENT_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCM_WEBAPP_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The address of the SCM web application.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SCM_WEBAPP_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_SCM_WEBAPP_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IN_MEMORY_STORE_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IN_MEMORY_STALENESS_PERIOD_MINS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[A resource in the InMemorySCMStore is considered stale if the time since
+ the last reference exceeds the staleness period. This value is specified in
+ minutes.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_IN_MEMORY_STALENESS_PERIOD_MINS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IN_MEMORY_INITIAL_DELAY_MINS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Initial delay before the in-memory store runs its first check to remove
+ dead initial applications. Specified in minutes.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_IN_MEMORY_INITIAL_DELAY_MINS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="IN_MEMORY_CHECK_PERIOD_MINS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The frequency at which the in-memory store checks to remove dead initial
+ applications. Specified in minutes.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_IN_MEMORY_CHECK_PERIOD_MINS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCM_CLEANER_PERIOD_MINS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The frequency at which a cleaner task runs. Specified in minutes.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SCM_CLEANER_PERIOD_MINS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCM_CLEANER_INITIAL_DELAY_MINS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Initial delay before the first cleaner task is scheduled. Specified in
+ minutes.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SCM_CLEANER_INITIAL_DELAY_MINS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCM_CLEANER_RESOURCE_SLEEP_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The time to sleep between processing each shared cache resource. Specified
+ in milliseconds.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SCM_CLEANER_RESOURCE_SLEEP_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCM_UPLOADER_SERVER_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The address of the node manager interface in the SCM.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SCM_UPLOADER_SERVER_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_SCM_UPLOADER_SERVER_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCM_UPLOADER_SERVER_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The number of SCM threads used to handle notify requests from the node
+ manager.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SCM_UPLOADER_SERVER_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCM_CLIENT_SERVER_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The address of the client interface in the SCM.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SCM_CLIENT_SERVER_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_SCM_CLIENT_SERVER_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCM_CLIENT_SERVER_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The number of threads used to handle shared cache manager requests.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SCM_CLIENT_SERVER_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SHARED_CACHE_CHECKSUM_ALGO_IMPL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[the checksum algorithm implementation]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SHARED_CACHE_CHECKSUM_ALGO_IMPL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SHARED_CACHE_NM_UPLOADER_REPLICATION_FACTOR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The replication factor for the node manager uploader for the shared cache.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_SHARED_CACHE_NM_UPLOADER_REPLICATION_FACTOR" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SHARED_CACHE_NM_UPLOADER_THREAD_COUNT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_SHARED_CACHE_NM_UPLOADER_THREAD_COUNT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FEDERATION_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_FAILOVER_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FEDERATION_FAILOVER_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_STATESTORE_CLIENT_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FEDERATION_STATESTORE_CLIENT_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_CACHE_TIME_TO_LIVE_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_FLUSH_CACHE_FOR_RM_ADDR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FEDERATION_FLUSH_CACHE_FOR_RM_ADDR" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_REGISTRY_BASE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FEDERATION_REGISTRY_BASE_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FEDERATION_STATESTORE_HEARTBEAT_INTERVAL_SECS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_MACHINE_LIST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_CLUSTER_RESOLVER_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FEDERATION_CLUSTER_RESOLVER_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_AMRMPROXY_HB_MAX_WAIT_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FEDERATION_AMRMPROXY_HB_MAX_WAIT_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_AMRMPROXY_SUBCLUSTER_TIMEOUT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FEDERATION_AMRMPROXY_SUBCLUSTER_TIMEOUT" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FEDERATION_POLICY_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_POLICY_MANAGER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FEDERATION_POLICY_MANAGER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_POLICY_MANAGER_PARAMS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FEDERATION_POLICY_MANAGER_PARAMS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_STATESTORE_ZK_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_STATESTORE_ZK_PARENT_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Parent znode path under which ZKRMStateStore will create znodes.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_FEDERATION_STATESTORE_ZK_PARENT_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_STATESTORE_SQL_USERNAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_STATESTORE_SQL_PASSWORD" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_STATESTORE_SQL_URL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_STATESTORE_SQL_JDBC_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FEDERATION_STATESTORE_SQL_JDBC_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FEDERATION_STATESTORE_SQL_MAXCONNECTIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FEDERATION_STATESTORE_SQL_MAXCONNECTIONS" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_BIND_HOST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_CLIENTRM_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_CLIENTRM_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ROUTER_CLIENTRM_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ROUTER_CLIENTRM_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_CLIENTRM_INTERCEPTOR_CLASS_PIPELINE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ROUTER_CLIENTRM_INTERCEPTOR_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_PIPELINE_CACHE_MAX_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ROUTER_PIPELINE_CACHE_MAX_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_RMADMIN_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_RMADMIN_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ROUTER_RMADMIN_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ROUTER_RMADMIN_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_RMADMIN_INTERCEPTOR_CLASS_PIPELINE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ROUTER_RMADMIN_INTERCEPTOR_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_CLIENTRM_SUBMIT_RETRY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The number of retries for GetNewApplication and SubmitApplication in
+ {@code FederationClientInterceptor}.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_ROUTER_CLIENTRM_SUBMIT_RETRY" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_WEBAPP_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_USER_CLIENT_THREADS_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ROUTER_USER_CLIENT_THREADS_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_WEBAPP_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The address of the Router web application.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_ROUTER_WEBAPP_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ROUTER_WEBAPP_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_WEBAPP_HTTPS_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The https address of the Router web application.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_ROUTER_WEBAPP_HTTPS_PORT" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ROUTER_WEBAPP_HTTPS_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_WEBAPP_INTERCEPTOR_CLASS_PIPELINE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ROUTER_WEBAPP_INTERCEPTOR_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The interceptor class used in FederationInterceptorREST to communicate with
+ each SubCluster.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The interceptor class used in FederationInterceptorREST should return
+ partial AppReports.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CSI_ADAPTOR_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[TERMS:
+ csi-driver: a 3rd party CSI driver which implements the CSI protocol.
+   It is provided by the storage system.
+ csi-driver-adaptor: this is an internal RPC service working
+   as a bridge between YARN and a csi-driver.]]>
+      </doc>
+    </field>
+    <field name="NM_CSI_DRIVER_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CSI_DRIVER_ENDPOINT_SUFFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CSI_ADAPTOR_ADDRESS_SUFFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CSI_ADAPTOR_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CSI_ADAPTOR_ADDRESSES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[One or more socket addresses for csi-adaptor.
+ Multiple addresses are delimited by ",".]]>
+      </doc>
+    </field>
+    <field name="NM_CSI_DRIVER_NAMES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_REGISTRY_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_YARN_REGISTRY_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Use YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS instead.
+ The interval of the yarn client's querying application state after
+ application submission. The unit is millisecond.]]>
+      </doc>
+    </field>
+    <field name="YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The interval that the yarn client library uses to poll the completion
+ status of the asynchronous API of application client protocol.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The duration that the yarn client library waits, cumulatively across polls,
+ for an expected state change to occur. Defaults to -1, which indicates no
+ limit.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Max number of threads in NMClientAsync to process container management
+ events]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CLIENT_MAX_NM_PROXIES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Maximum number of proxy connections to cache for node managers. If set
+ to a value greater than zero then the cache is enabled and the NMClient
+ and MRAppMaster will cache the specified number of node manager proxies.
+ There will be at max one proxy per node manager. Ex. configuring it to a
+ value of 5 will make sure that client will at max have 5 proxies cached
+ with 5 different node managers. These connections for these proxies will
+ be timed out if idle for more than the system wide idle timeout period.
+ Note that this could cause issues on large clusters as many connections
+ could linger simultaneously and lead to a large number of connection
+ threads. The token used for authentication will be used only at
+ connection creation time. If a new token is received then the earlier
+ connection should be closed in order to use the new token. This and
+ {@link YarnConfiguration#NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE} are related
+ and should be in sync (no need for them to be equal).
+ If the value of this property is zero then the connection cache is
+ disabled and connections will use a zero idle timeout to prevent too
+ many connection threads on large clusters.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_CLIENT_MAX_NM_PROXIES" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CLIENT_NM_CONNECT_MAX_WAIT_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Max time to wait to establish a connection to NM]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_CLIENT_NM_CONNECT_MAX_WAIT_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CLIENT_NM_CONNECT_RETRY_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Time interval between each attempt to connect to NM]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_CLIENT_NM_CONNECT_RETRY_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_HTTP_POLICY_KEY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_HTTP_POLICY_DEFAULT" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_RESOURCEMANAGER_CONNECT_MAX_WAIT_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Max time to wait for NM to connection to RM.
+ When not set, proxy will fall back to use value of
+ RESOURCEMANAGER_CONNECT_MAX_WAIT_MS.]]>
+      </doc>
+    </field>
+    <field name="NM_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Time interval between each NM attempt to connection to RM.
+ When not set, proxy will fall back to use value of
+ RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS.]]>
+      </doc>
+    </field>
+    <field name="NODE_LABELS_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Node-labels configurations]]>
+      </doc>
+    </field>
+    <field name="FS_NODE_LABELS_STORE_IMPL_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Node label store implementation class]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_FS_NODE_LABELS_STORE_IMPL_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FS_NODE_LABELS_STORE_ROOT_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[URI for NodeLabelManager]]>
+      </doc>
+    </field>
+    <field name="NODE_ATTRIBUTE_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Node-attribute configurations.]]>
+      </doc>
+    </field>
+    <field name="FS_NODE_ATTRIBUTE_STORE_IMPL_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Node attribute store implementation class.]]>
+      </doc>
+    </field>
+    <field name="FS_NODE_ATTRIBUTE_STORE_ROOT_DIR" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[File system node attribute store directory.]]>
+      </doc>
+    </field>
+    <field name="NODE_LABELS_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Flag to indicate if the node labels feature enabled, by default it's
+ disabled]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NODE_LABELS_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NODELABEL_CONFIGURATION_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CENTRALIZED_NODELABEL_CONFIGURATION_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DELEGATED_CENTALIZED_NODELABEL_CONFIGURATION_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DISTRIBUTED_NODELABEL_CONFIGURATION_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NODELABEL_CONFIGURATION_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="EXCLUSIVE_ENFORCED_PARTITIONS_SUFFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="EXCLUSIVE_ENFORCED_PARTITIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_CLUSTER_LEVEL_APPLICATION_PRIORITY" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="APP_ATTEMPT_DIAGNOSTICS_LIMIT_KC" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_APP_ATTEMPT_DIAGNOSTICS_LIMIT_KC" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NODE_LABELS_PROVIDER_CONFIG" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NODE_ATTRIBUTES_PROVIDER_CONFIG" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CONFIG_NODE_DESCRIPTOR_PROVIDER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="SCRIPT_NODE_DESCRIPTOR_PROVIDER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NODE_LABELS_RESYNC_INTERVAL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_NODE_LABELS_RESYNC_INTERVAL" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NODE_ATTRIBUTES_RESYNC_INTERVAL" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_NODE_ATTRIBUTES_RESYNC_INTERVAL" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NODE_LABELS_PROVIDER_FETCH_TIMEOUT_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_NODE_LABELS_PROVIDER_FETCH_TIMEOUT_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_PROVIDER_CONFIGURED_NODE_PARTITION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_PROVIDER_CONFIGURED_NODE_ATTRIBUTES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NODE_LABELS_PROVIDER_CONFIG" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_RM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="AM_SCHEDULING_NODE_BLACKLISTING_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_AM_SCHEDULING_NODE_BLACKLISTING_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="AM_SCHEDULING_NODE_BLACKLISTING_DISABLE_THRESHOLD" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_AM_SCHEDULING_NODE_BLACKLISTING_DISABLE_THRESHOLD" type="float"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_SCRIPT_OPTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NODE_ATTRIBUTES_PROVIDER_FETCH_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Node attribute provider fetch attributes interval and timeout.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_NODE_ATTRIBUTES_PROVIDER_FETCH_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NODE_ATTRIBUTES_PROVIDER_FETCH_TIMEOUT_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_NODE_ATTRIBUTES_PROVIDER_FETCH_TIMEOUT_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_SCRIPT_BASED_NODE_ATTRIBUTES_PROVIDER_PATH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_SCRIPT_BASED_NODE_ATTRIBUTES_PROVIDER_OPTS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DISPLAY_APPS_FOR_LOGGED_IN_USER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILTER_ENTITY_LIST_BY_USER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_DISPLAY_APPS_FOR_LOGGED_IN_USER" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FILTER_INVALID_XML_CHARS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FILTER_INVALID_XML_CHARS" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="ENABLE_REST_APP_SUBMISSIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_ENABLE_REST_APP_SUBMISSIONS" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="REST_CSRF" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_CSRF_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CSRF_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_CSRF_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_CSRF_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CSRF_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_CSRF_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_CSRF_CUSTOM_HEADER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CSRF_CUSTOM_HEADER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_CSRF_CUSTOM_HEADER" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_CSRF_METHODS_TO_IGNORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CSRF_METHODS_TO_IGNORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_CSRF_METHODS_TO_IGNORE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="XFS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="YARN_XFS_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_XFS_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_XFS_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_XFS_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_XFS_OPTIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_XFS_OPTIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_XFS_OPTIONS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_READER_BIND_HOST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Settings for timeline reader.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_READER_WEBAPP_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_READER_WEBAPP_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_READER_WEBAPP_HTTPS_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_READER_WEBAPP_HTTPS_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_READER_STORAGE_MONITOR_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_STORAGE_MONITOR_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_COLLECTOR_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Marked collector properties as Private since it run as auxillary service.]]>
+      </doc>
+    </field>
+    <field name="TIMELINE_SERVICE_COLLECTOR_BIND_HOST" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_COLLECTOR_BIND_PORT_RANGES" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_COLLECTOR_WEBAPP_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_COLLECTOR_WEBAPP_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="TIMELINE_SERVICE_COLLECTOR_WEBAPP_HTTPS_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TIMELINE_SERVICE_COLLECTOR_WEBAPP_HTTPS_ADDRESS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NUMA_AWARENESS_ENABLED" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Settings for NUMA awareness.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_NM_NUMA_AWARENESS_ENABLED" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NUMA_AWARENESS_READ_TOPOLOGY" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_NUMA_AWARENESS_READ_TOPOLOGY" type="boolean"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NUMA_AWARENESS_NODE_IDS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_NUMA_AWARENESS_NUMACTL_CMD" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_NM_NUMA_AWARENESS_NUMACTL_CMD" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ACTIVITIES_MANAGER_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Settings for activities manager.]]>
+      </doc>
+    </field>
+    <field name="RM_ACTIVITIES_MANAGER_SCHEDULER_ACTIVITIES_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ACTIVITIES_MANAGER_APP_ACTIVITIES_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ACTIVITIES_MANAGER_CLEANUP_INTERVAL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[The cleanup interval for activities in milliseconds.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_ACTIVITIES_MANAGER_CLEANUP_INTERVAL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ACTIVITIES_MANAGER_SCHEDULER_ACTIVITIES_TTL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Time to live for scheduler activities in milliseconds.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_ACTIVITIES_MANAGER_SCHEDULER_ACTIVITIES_TTL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ACTIVITIES_MANAGER_APP_ACTIVITIES_TTL_MS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Time to live for app activities in milliseconds.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_ACTIVITIES_MANAGER_APP_ACTIVITIES_TTL_MS" type="long"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="RM_ACTIVITIES_MANAGER_APP_ACTIVITIES_MAX_QUEUE_LENGTH" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Max queue length for app activities.]]>
+      </doc>
+    </field>
+    <field name="DEFAULT_RM_ACTIVITIES_MANAGER_APP_ACTIVITIES_MAX_QUEUE_LENGTH" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="NM_CONTAINERS_LAUNCHER_CLASS" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Containers launcher implementation to use.]]>
+      </doc>
+    </field>
+    <field name="YARN_WORKFLOW_ID_TAG_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_YARN_WORKFLOW_ID_TAG_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.conf.YarnConfiguration -->
+</package>
+<package name="org.apache.hadoop.yarn.exceptions">
+  <!-- start class org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException -->
+  <class name="ApplicationAttemptNotFoundException" extends="org.apache.hadoop.yarn.exceptions.YarnException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ApplicationAttemptNotFoundException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ApplicationAttemptNotFoundException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ApplicationAttemptNotFoundException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This exception is thrown on
+ {@link ApplicationHistoryProtocol#getApplicationAttemptReport (GetApplicationAttemptReportRequest)}
+ API when the Application Attempt doesn't exist in Application History Server or
+ {@link ApplicationMasterProtocol#allocate(AllocateRequest)} if application
+ doesn't exist in RM.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException -->
+  <!-- start class org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException -->
+  <class name="ApplicationIdNotProvidedException" extends="org.apache.hadoop.yarn.exceptions.YarnException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ApplicationIdNotProvidedException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ApplicationIdNotProvidedException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ApplicationIdNotProvidedException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Exception to be thrown when Client submit an application without
+ providing {@link ApplicationId} in {@link ApplicationSubmissionContext}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException -->
+  <!-- start class org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException -->
+  <class name="ApplicationNotFoundException" extends="org.apache.hadoop.yarn.exceptions.YarnException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ApplicationNotFoundException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ApplicationNotFoundException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ApplicationNotFoundException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This exception is thrown on
+ {@link ApplicationClientProtocol#getApplicationReport
+ (GetApplicationReportRequest)} API
+ when the Application doesn't exist in RM and AHS]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException -->
+  <!-- start class org.apache.hadoop.yarn.exceptions.ConfigurationException -->
+  <class name="ConfigurationException" extends="org.apache.hadoop.yarn.exceptions.YarnException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ConfigurationException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ConfigurationException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ConfigurationException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This exception is thrown on unrecoverable configuration errors.
+ An example is container launch error due to configuration.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.exceptions.ConfigurationException -->
+  <!-- start class org.apache.hadoop.yarn.exceptions.ContainerNotFoundException -->
+  <class name="ContainerNotFoundException" extends="org.apache.hadoop.yarn.exceptions.YarnException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerNotFoundException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerNotFoundException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerNotFoundException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This exception is thrown on
+ {@link ApplicationHistoryProtocol#getContainerReport (GetContainerReportRequest)}
+ API when the container doesn't exist in AHS]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.exceptions.ContainerNotFoundException -->
+  <!-- start class org.apache.hadoop.yarn.exceptions.ResourceNotFoundException -->
+  <class name="ResourceNotFoundException" extends="org.apache.hadoop.yarn.exceptions.YarnRuntimeException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ResourceNotFoundException" type="org.apache.hadoop.yarn.api.records.Resource, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ResourceNotFoundException" type="org.apache.hadoop.yarn.api.records.Resource, java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ResourceNotFoundException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This exception is thrown when details of an unknown resource type
+ are requested.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.exceptions.ResourceNotFoundException -->
+  <!-- start class org.apache.hadoop.yarn.exceptions.YarnException -->
+  <class name="YarnException" extends="java.lang.Exception"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YarnException"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="YarnException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="YarnException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="YarnException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[YarnException indicates exceptions from yarn servers. On the other hand,
+ IOExceptions indicates exceptions from RPC layer.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.exceptions.YarnException -->
+  <!-- start class org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException -->
+  <class name="YARNFeatureNotEnabledException" extends="org.apache.hadoop.yarn.exceptions.YarnException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YARNFeatureNotEnabledException" type="java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="YARNFeatureNotEnabledException" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="YARNFeatureNotEnabledException" type="java.lang.String, java.lang.Throwable"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[This exception is thrown when a feature is being used which is not enabled
+ yet.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api">
+  <!-- start class org.apache.hadoop.yarn.server.api.ApplicationInitializationContext -->
+  <class name="ApplicationInitializationContext" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ApplicationInitializationContext" type="java.lang.String, org.apache.hadoop.yarn.api.records.ApplicationId, java.nio.ByteBuffer"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getUser" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the user-name of the application-submitter
+ 
+ @return user-name]]>
+      </doc>
+    </method>
+    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get {@link ApplicationId} of the application
+ 
+ @return applications ID]]>
+      </doc>
+    </method>
+    <method name="getApplicationDataForService" return="java.nio.ByteBuffer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the data sent to the NodeManager via
+ {@link ContainerManagementProtocol#startContainers(StartContainersRequest)}
+ as part of {@link ContainerLaunchContext#getServiceData()}
+ 
+ @return the servicesData for this application.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Initialization context for {@link AuxiliaryService} when starting an
+ application.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.ApplicationInitializationContext -->
+  <!-- start class org.apache.hadoop.yarn.server.api.ApplicationTerminationContext -->
+  <class name="ApplicationTerminationContext" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ApplicationTerminationContext" type="org.apache.hadoop.yarn.api.records.ApplicationId"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get {@link ApplicationId} of the application being stopped.
+ 
+ @return applications ID]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Initialization context for {@link AuxiliaryService} when stopping an
+ application.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.ApplicationTerminationContext -->
+  <!-- start interface org.apache.hadoop.yarn.server.api.AuxiliaryLocalPathHandler -->
+  <interface name="AuxiliaryLocalPathHandler"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getLocalPathForRead" return="org.apache.hadoop.fs.Path"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a path from the local FS for reading for a given Auxiliary Service.
+ @param path the requested path
+ @return the complete path to the file on a local disk
+ @throws IOException if the file read encounters a problem]]>
+      </doc>
+    </method>
+    <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a path from the local FS for writing for a given Auxiliary Service.
+ @param path the requested path
+ @return the complete path to the file on a local disk
+ @throws IOException if the path creations fails]]>
+      </doc>
+    </method>
+    <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="java.lang.String"/>
+      <param name="size" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a path from the local FS for writing a file of an estimated size
+ for a given Auxiliary Service.
+ @param path the requested path
+ @param size the size of the file that is going to be written
+ @return the complete path to the file on a local disk
+ @throws IOException if the path creations fails]]>
+      </doc>
+    </method>
+    <method name="getAllLocalPathsForRead" return="java.lang.Iterable"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get all paths from the local FS for reading for a given Auxiliary Service.
+ @param path the requested path
+ @return the complete path list to the file on a local disk as an Iterable
+ @throws IOException if the file read encounters a problem]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[An Interface that can retrieve local directories to read from or write to.
+  Components can implement this interface to link it to
+  their own Directory Handler Service]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.server.api.AuxiliaryLocalPathHandler -->
+  <!-- start class org.apache.hadoop.yarn.server.api.AuxiliaryService -->
+  <class name="AuxiliaryService" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AuxiliaryService" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getRecoveryPath" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the path specific to this auxiliary service to use for recovery.
+
+ @return state storage path or null if recovery is not enabled]]>
+      </doc>
+    </method>
+    <method name="initializeApplication"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="initAppContext" type="org.apache.hadoop.yarn.server.api.ApplicationInitializationContext"/>
+      <doc>
+      <![CDATA[A new application is started on this NodeManager. This is a signal to
+ this {@link AuxiliaryService} about the application initialization.
+ 
+ @param initAppContext context for the application's initialization]]>
+      </doc>
+    </method>
+    <method name="stopApplication"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="stopAppContext" type="org.apache.hadoop.yarn.server.api.ApplicationTerminationContext"/>
+      <doc>
+      <![CDATA[An application is finishing on this NodeManager. This is a signal to this
+ {@link AuxiliaryService} about the same.
+ 
+ @param stopAppContext context for the application termination]]>
+      </doc>
+    </method>
+    <method name="getMetaData" return="java.nio.ByteBuffer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Retrieve meta-data for this {@link AuxiliaryService}. Applications using
+ this {@link AuxiliaryService} SHOULD know the format of the meta-data -
+ ideally each service should provide a method to parse out the information
+ to the applications. One example of meta-data is contact information so
+ that applications can access the service remotely. This will only be called
+ after the service's {@link #start()} method has finished. the result may be
+ cached.
+ 
+ <p>
+ The information is passed along to applications via
+ {@link StartContainersResponse#getAllServicesMetaData()} that is returned by
+ {@link ContainerManagementProtocol#startContainers(StartContainersRequest)}
+ </p>
+ 
+ @return meta-data for this service that should be made available to
+         applications.]]>
+      </doc>
+    </method>
+    <method name="initializeContainer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="initContainerContext" type="org.apache.hadoop.yarn.server.api.ContainerInitializationContext"/>
+      <doc>
+      <![CDATA[A new container is started on this NodeManager. This is a signal to
+ this {@link AuxiliaryService} about the container initialization.
+ This method is called when the NodeManager receives the container launch
+ command from the ApplicationMaster and before the container process is 
+ launched.
+
+ @param initContainerContext context for the container's initialization]]>
+      </doc>
+    </method>
+    <method name="stopContainer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="stopContainerContext" type="org.apache.hadoop.yarn.server.api.ContainerTerminationContext"/>
+      <doc>
+      <![CDATA[A container is finishing on this NodeManager. This is a signal to this
+ {@link AuxiliaryService} about the same.
+
+ @param stopContainerContext context for the container termination]]>
+      </doc>
+    </method>
+    <method name="setRecoveryPath"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="recoveryPath" type="org.apache.hadoop.fs.Path"/>
+      <doc>
+      <![CDATA[Set the path for this auxiliary service to use for storing state
+ that will be used during recovery.
+
+ @param recoveryPath where recoverable state should be stored]]>
+      </doc>
+    </method>
+    <method name="getAuxiliaryLocalPathHandler" return="org.apache.hadoop.yarn.server.api.AuxiliaryLocalPathHandler"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Method that gets the local dirs path handler for this Auxiliary Service.
+
+ @return auxiliaryPathHandler object that is used to read from and write to
+ valid local Dirs.]]>
+      </doc>
+    </method>
+    <method name="setAuxiliaryLocalPathHandler"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="auxiliaryLocalPathHandler" type="org.apache.hadoop.yarn.server.api.AuxiliaryLocalPathHandler"/>
+      <doc>
+      <![CDATA[Method that sets the local dirs path handler for this Auxiliary Service.
+
+ @param auxiliaryLocalPathHandler the pathHandler for this auxiliary service]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A generic service that will be started by the NodeManager. This is a service
+ that administrators have to configure on each node by setting
+ {@link YarnConfiguration#NM_AUX_SERVICES}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.AuxiliaryService -->
+  <!-- start class org.apache.hadoop.yarn.server.api.ContainerContext -->
+  <class name="ContainerContext" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerContext" type="java.lang.String, org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.Resource"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerContext" type="java.lang.String, org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.Resource, org.apache.hadoop.yarn.server.api.ContainerType"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerContext" type="java.lang.String, org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.Resource, org.apache.hadoop.yarn.server.api.ContainerType, org.apache.hadoop.yarn.api.records.ExecutionType"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getUser" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get user of the container being initialized or stopped.
+
+ @return the user]]>
+      </doc>
+    </method>
+    <method name="getContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get {@link ContainerId} of the container being initialized or stopped.
+
+ @return the container ID]]>
+      </doc>
+    </method>
+    <method name="getResource" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get {@link Resource} the resource capability allocated to the container
+ being initialized or stopped.
+
+ @return the resource capability.]]>
+      </doc>
+    </method>
+    <method name="getContainerType" return="org.apache.hadoop.yarn.server.api.ContainerType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get {@link ContainerType} the type of the container
+ being initialized or stopped.
+
+ @return the type of the container]]>
+      </doc>
+    </method>
+    <method name="getExecutionType" return="org.apache.hadoop.yarn.api.records.ExecutionType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get {@link ExecutionType} the execution type of the container
+ being initialized or stopped.
+
+ @return the execution type of the container]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Base context class for {@link AuxiliaryService} initializing and stopping a
+ container.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.ContainerContext -->
+  <!-- start class org.apache.hadoop.yarn.server.api.ContainerInitializationContext -->
+  <class name="ContainerInitializationContext" extends="org.apache.hadoop.yarn.server.api.ContainerContext"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerInitializationContext" type="java.lang.String, org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.Resource"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerInitializationContext" type="java.lang.String, org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.Resource, org.apache.hadoop.yarn.server.api.ContainerType"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Initialization context for {@link AuxiliaryService} when starting a
+ container.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.ContainerInitializationContext -->
+  <!-- start interface org.apache.hadoop.yarn.server.api.ContainerLogAggregationPolicy -->
+  <interface name="ContainerLogAggregationPolicy"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="parseParameters"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="parameters" type="java.lang.String"/>
+      <doc>
+      <![CDATA[<p>
+ The method used by the NodeManager log aggregation service
+ to initial the policy object with parameters specified by the application
+ or the cluster-wide setting.
+ </p>
+
+ @param parameters parameters with scheme defined by the policy class.]]>
+      </doc>
+    </method>
+    <method name="shouldDoLogAggregation" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="logContext" type="org.apache.hadoop.yarn.server.api.ContainerLogContext"/>
+      <doc>
+      <![CDATA[<p>
+ The method used by the NodeManager log aggregation service
+ to ask the policy object if a given container's logs should be aggregated.
+ </p>
+
+ @param logContext ContainerLogContext
+ @return Whether or not the container's logs should be aggregated.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This API is used by NodeManager to decide if a given container's logs
+ should be aggregated at run time.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.server.api.ContainerLogAggregationPolicy -->
+  <!-- start class org.apache.hadoop.yarn.server.api.ContainerLogContext -->
+  <class name="ContainerLogContext" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerLogContext" type="org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.server.api.ContainerType, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get {@link ContainerId} of the container.
+
+ @return the container ID]]>
+      </doc>
+    </method>
+    <method name="getContainerType" return="org.apache.hadoop.yarn.server.api.ContainerType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get {@link ContainerType} the type of the container.
+
+ @return the type of the container]]>
+      </doc>
+    </method>
+    <method name="getExitCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the exit code of the container.
+
+ @return the exit code]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Context class for {@link ContainerLogAggregationPolicy}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.ContainerLogContext -->
+  <!-- start class org.apache.hadoop.yarn.server.api.ContainerTerminationContext -->
+  <class name="ContainerTerminationContext" extends="org.apache.hadoop.yarn.server.api.ContainerContext"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerTerminationContext" type="java.lang.String, org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.Resource"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTerminationContext" type="java.lang.String, org.apache.hadoop.yarn.api.records.ContainerId, org.apache.hadoop.yarn.api.records.Resource, org.apache.hadoop.yarn.server.api.ContainerType"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[Termination context for {@link AuxiliaryService} when stopping a
+ container.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.ContainerTerminationContext -->
+  <!-- start interface org.apache.hadoop.yarn.server.api.SCMAdminProtocol -->
+  <interface name="SCMAdminProtocol"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="runCleanerTask" return="org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.server.api.protocolrecords.RunSharedCacheCleanerTaskRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The method used by administrators to ask SCM to run cleaner task right away
+ </p>
+
+ @param request request <code>SharedCacheManager</code> to run a cleaner task
+ @return <code>SharedCacheManager</code> returns an empty response
+         on success and throws an exception on rejecting the request
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[<p>
+ The protocol between administrators and the <code>SharedCacheManager</code>
+ </p>]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.server.api.SCMAdminProtocol -->
+</package>
+<package name="org.apache.hadoop.yarn.util">
+</package>
+<package name="org.apache.hadoop.yarn.util.constraint">
+  <!-- start class org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser -->
+  <class name="PlacementConstraintParser" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <method name="parseExpression" return="org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="constraintStr" type="java.lang.String"/>
+      <exception name="PlacementConstraintParseException" type="org.apache.hadoop.yarn.util.constraint.PlacementConstraintParseException"/>
+      <doc>
+      <![CDATA[Parses a given constraint expression to a {@link AbstractConstraint},
+ this expression can be any valid form of constraint expressions.
+
+ @param constraintStr expression string
+ @return a parsed {@link AbstractConstraint}
+ @throws PlacementConstraintParseException when given expression
+ is malformed]]>
+      </doc>
+    </method>
+    <method name="parsePlacementSpec" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="expression" type="java.lang.String"/>
+      <exception name="PlacementConstraintParseException" type="org.apache.hadoop.yarn.util.constraint.PlacementConstraintParseException"/>
+      <doc>
+      <![CDATA[Parses a placement constraint specification. A placement constraint spec
+ is a composite expression which is composed by multiple sub constraint
+ expressions delimited by ":". With following syntax:
+
+ <p>Tag1(N1),P1:Tag2(N2),P2:...:TagN(Nn),Pn</p>
+
+ where <b>TagN(Nn)</b> is a key value pair to determine the source
+ allocation tag and the number of allocations, such as:
+
+ <p>foo(3)</p>
+
+ Optional when using NodeAttribute Constraint.
+
+ and where <b>Pn</b> can be any form of a valid constraint expression,
+ such as:
+
+ <ul>
+   <li>in,node,foo,bar</li>
+   <li>notin,node,foo,bar,1,2</li>
+   <li>and(notin,node,foo:notin,node,bar)</li>
+ </ul>
+
+ and NodeAttribute Constraint such as
+
+ <ul>
+   <li>yarn.rm.io/foo=true</li>
+   <li>java=1.7,1.8</li>
+ </ul>
+ @param expression expression string.
+ @return a map of source tags to placement constraint mapping.
+ @throws PlacementConstraintParseException]]>
+      </doc>
+    </method>
+    <field name="EXPRESSION_VAL_DELIM" type="char"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Placement constraint expression parser.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.util.constraint.PlacementConstraintParser -->
+</package>
+<package name="org.apache.hadoop.yarn.util.csi">
+</package>
+<package name="org.apache.hadoop.yarn.util.resource">
+</package>
+
+</api>
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.3.4.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.3.4.xml
new file mode 100644
index 0000000..aa23d55
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.3.4.xml
@@ -0,0 +1,3067 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Fri Jul 29 14:13:01 GMT 2022 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop YARN Client 3.3.4"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-annotations.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/jdiff.jar -verbose -classpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/classes:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-3.3.4.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-protobuf_3_7/1.1.1/hadoop-shaded-protobuf_3_7-1.1.1.jar:/maven/com/google/guava/guava/27.0-jre/guava-27.0-jre.jar:/maven/com/google/guava/failureaccess/1.0/failureaccess-1.0.jar:/maven/com/google/guava/listenablefuture/9999.0-empty-to-avoid-conflict-with-guava/listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar:/maven/org/checkerframework/checker-qual/2.5.2/checker-qual-2.5.2.jar:/maven/com/google/j2objc/j2objc-annotations/1.1/j2objc-annotations-1.1.jar:/maven/org/codehaus/mojo/animal-sniffer-annotations/1.17/animal-sniffer-annotations-1.17.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/org/apache/httpcomponents/httpclient/4.5.13/httpclient-4.5.13.jar:/maven/org/apache/httpcomponents/httpcore/4.4.13/httpcore-4.4.13.jar:/maven/commons-codec/commons-codec/1.15/commons-codec-1.15.jar:/maven/commons-io/commons-io/2.8.0/commons-io-2.8.0.jar:/maven/commons-net/commons-net/3.6/commons-net-3.6.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/javax/servlet/javax.servlet-api/3.1.0/javax.servlet-api-3.1.0.jar:/maven/jakarta/activation/jakarta.activation-api/1.2.1/jakarta.activation-api-1.2.1.jar:/maven/org/eclipse/jetty/jetty-server/9.4.43.v20210629/jetty-server-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-http/9.4.43.v20210629/jetty-http-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-util/9.4.43.v20210629/jetty-util-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-servlet/9.4.43.v20210629/jetty-servlet-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-security/9.4.43.v20210629/jetty-security-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-webapp/9.4.43.v20210629/jetty-webapp-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-xml/9.4.43.v20210629/jetty-xml-9.4.43.v20210629.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-core/1.19/jersey-core-1.19.jar:/maven/javax/ws/rs/jsr311-api/1.1.1/jsr311-api-1.1.1.jar:/maven/com/sun/jersey/jersey-servlet/1.19/jersey-servlet-1.19.jar:/maven/com/sun/jersey/jersey-json/1.19/jersey-json-1.19.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/com/sun/jersey/jersey-server/1.19/jersey-server-1.19.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-beanutils/commons-beanutils/1.9.4/commons-beanutils-1.9.4.jar:/maven/org/apache/commons/commons-configuration2/2.1.1/commons-configuration2-2.1.1.jar:/maven/org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.jar:/maven/org/apache/commons/commons-text/1.4/commons-text-1.4.jar:/maven/org/slf4j/slf4j-api/1.7.36/slf4j-api-1.7.36.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/com/google/re2j/re2j/1.1/re2j-1.1.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/com/google/code/gson/gson/2.8.9/gson-2.8.9.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-3.3.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/9.8.1/nimbus-jose-jwt-9.8.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/2.4.7/json-smart-2.4.7.jar:/maven/net/minidev/accessors-smart/2.4.7/accessors-smart-2.4.7.jar:/maven/org/ow2/asm/asm/5.0.4/asm-5.0.4.jar:/maven/org/apache/curator/curator-framework/4.2.0/curator-framework-4.2.0.jar:/maven/org/apache/kerby/kerb-simplekdc/1.0.1/kerb-simplekdc-1.0.1.jar:/maven/org/apache/kerby/kerb-client/1.0.1/kerb-client-1.0.1.jar:/maven/org/apache/kerby/kerby-config/1.0.1/kerby-config-1.0.1.jar:/maven/org/apache/kerby/kerb-common/1.0.1/kerb-common-1.0.1.jar:/maven/org/apache/kerby/kerb-crypto/1.0.1/kerb-crypto-1.0.1.jar:/maven/org/apache/kerby/kerb-util/1.0.1/kerb-util-1.0.1.jar:/maven/org/apache/kerby/token-provider/1.0.1/token-provider-1.0.1.jar:/maven/org/apache/kerby/kerb-admin/1.0.1/kerb-admin-1.0.1.jar:/maven/org/apache/kerby/kerb-server/1.0.1/kerb-server-1.0.1.jar:/maven/org/apache/kerby/kerb-identity/1.0.1/kerb-identity-1.0.1.jar:/maven/org/apache/kerby/kerby-xdr/1.0.1/kerby-xdr-1.0.1.jar:/maven/com/jcraft/jsch/0.1.55/jsch-0.1.55.jar:/maven/org/apache/curator/curator-client/4.2.0/curator-client-4.2.0.jar:/maven/org/apache/curator/curator-recipes/4.2.0/curator-recipes-4.2.0.jar:/maven/com/google/code/findbugs/jsr305/3.0.2/jsr305-3.0.2.jar:/maven/org/apache/zookeeper/zookeeper/3.5.6/zookeeper-3.5.6.jar:/maven/org/apache/commons/commons-compress/1.21/commons-compress-1.21.jar:/maven/org/apache/kerby/kerb-core/1.0.1/kerb-core-1.0.1.jar:/maven/org/apache/kerby/kerby-pkix/1.0.1/kerby-pkix-1.0.1.jar:/maven/org/apache/kerby/kerby-asn1/1.0.1/kerby-asn1-1.0.1.jar:/maven/org/apache/kerby/kerby-util/1.0.1/kerby-util-1.0.1.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.12.7/jackson-databind-2.12.7.jar:/maven/org/codehaus/woodstox/stax2-api/4.2.1/stax2-api-4.2.1.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.3.0/woodstox-core-5.3.0.jar:/maven/dnsjava/dnsjava/2.1.7/dnsjava-2.1.7.jar:/maven/org/xerial/snappy/snappy-java/1.1.8.2/snappy-java-1.1.8.2.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-guava/1.1.1/hadoop-shaded-guava-1.1.1.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/ch/qos/reload4j/reload4j/1.2.22/reload4j-1.2.22.jar:/maven/org/eclipse/jetty/websocket/websocket-client/9.4.43.v20210629/websocket-client-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-client/9.4.43.v20210629/jetty-client-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-io/9.4.43.v20210629/jetty-io-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/websocket/websocket-common/9.4.43.v20210629/websocket-common-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/websocket/websocket-api/9.4.43.v20210629/websocket-api-9.4.43.v20210629.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-3.3.4.jar:/usr/lib/jvm/java-8-openjdk-amd64/lib/tools.jar:/maven/com/sun/jersey/jersey-client/1.19/jersey-client-1.19.jar:/maven/org/apache/zookeeper/zookeeper-jute/3.5.6/zookeeper-jute-3.5.6.jar:/maven/org/apache/yetus/audience-annotations/0.5.0/audience-annotations-0.5.0.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-3.3.4.jar:/maven/javax/xml/bind/jaxb-api/2.2.11/jaxb-api-2.2.11.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.12.7/jackson-annotations-2.12.7.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-3.3.4.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-3.3.4.jar:/maven/com/squareup/okhttp3/okhttp/4.9.3/okhttp-4.9.3.jar:/maven/com/squareup/okio/okio/2.8.0/okio-2.8.0.jar:/maven/org/jetbrains/kotlin/kotlin-stdlib/1.4.10/kotlin-stdlib-1.4.10.jar:/maven/org/jetbrains/kotlin/kotlin-stdlib-common/1.4.10/kotlin-stdlib-common-1.4.10.jar:/maven/com/google/inject/extensions/guice-servlet/4.0/guice-servlet-4.0.jar:/maven/com/google/inject/guice/4.0/guice-4.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.19/jersey-guice-1.19.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.12.7/jackson-core-2.12.7.jar:/maven/com/fasterxml/jackson/module/jackson-module-jaxb-annotations/2.12.7/jackson-module-jaxb-annotations-2.12.7.jar:/maven/jakarta/xml/bind/jakarta.xml.bind-api/2.3.2/jakarta.xml.bind-api-2.3.2.jar:/maven/com/fasterxml/jackson/jaxrs/jackson-jaxrs-json-provider/2.12.7/jackson-jaxrs-json-provider-2.12.7.jar:/maven/com/fasterxml/jackson/jaxrs/jackson-jaxrs-base/2.12.7/jackson-jaxrs-base-2.12.7.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/org/eclipse/jetty/jetty-util-ajax/9.4.43.v20210629/jetty-util-ajax-9.4.43.v20210629.jar:/maven/org/jline/jline/3.9.0/jline-3.9.0.jar:/maven/xerces/xercesImpl/2.12.2/xercesImpl-2.12.2.jar:/maven/xml-apis/xml-apis/1.4.01/xml-apis-1.4.01.jar -sourcepath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/hadoop-annotations.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/jdiff.jar -apidir /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/target/site/jdiff/xml -apiname Apache Hadoop YARN Client 3.3.4 -->
+<package name="org.apache.hadoop.yarn.client">
+</package>
+<package name="org.apache.hadoop.yarn.client.api">
+  <!-- start class org.apache.hadoop.yarn.client.api.AHSClient -->
+  <class name="AHSClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AHSClient" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAHSClient" return="org.apache.hadoop.yarn.client.api.AHSClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of AHSClient.]]>
+      </doc>
+    </method>
+    <method name="createAHSv2Client" return="org.apache.hadoop.yarn.client.api.AHSClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationReport" return="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get a report of the given Application.
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ <p>
+ If the user does not have <code>VIEW_APP</code> access then the following
+ fields in the report will be set to stubbed values:
+ <ul>
+   <li>host - set to "N/A"</li>
+   <li>RPC port - set to -1</li>
+   <li>client token - set to "N/A"</li>
+   <li>diagnostics - set to "N/A"</li>
+   <li>tracking URL - set to "N/A"</li>
+   <li>original tracking URL - set to "N/A"</li>
+   <li>resource usage report - all values are -1</li>
+ </ul>
+ 
+ @param appId
+          {@link ApplicationId} of the application that needs a report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of all Applications in the cluster.
+ </p>
+ 
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+ 
+ @return a list of reports for all applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttemptReport" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given ApplicationAttempt.
+ </p>
+ 
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+ 
+ @param applicationAttemptId
+          {@link ApplicationAttemptId} of the application attempt that needs
+          a report
+ @return application attempt report
+ @throws YarnException
+ @throws ApplicationAttemptNotFoundException if application attempt
+         not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttempts" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (ApplicationAttempts) of Application in the cluster.
+ </p>
+ 
+ @param applicationId
+ @return a list of reports for all application attempts for specified
+         application
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerReport" return="org.apache.hadoop.yarn.api.records.ContainerReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Container.
+ </p>
+ 
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+ 
+ @param containerId
+          {@link ContainerId} of the container that needs a report
+ @return container report
+ @throws YarnException
+ @throws ContainerNotFoundException if container not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (Containers) of ApplicationAttempt in the cluster.
+ </p>
+ 
+ @param applicationAttemptId
+ @return a list of reports of all containers for specified application
+         attempt
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.AHSClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.AMRMClient -->
+  <class name="AMRMClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAMRMClient" return="org.apache.hadoop.yarn.client.api.AMRMClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of AMRMClient.
+ For usage:
+ <pre>
+ {@code
+ AMRMClient.<T>createAMRMClientContainerRequest()
+ }</pre>
+ @return the newly create AMRMClient instance.]]>
+      </doc>
+    </method>
+    <method name="addSchedulingRequests"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="schedulingRequests" type="java.util.Collection"/>
+      <doc>
+      <![CDATA[Add a Collection of SchedulingRequests. The AMRMClient will ensure that
+ all requests in the same batch are sent in the same allocate call.
+ @param schedulingRequests Collection of Scheduling Requests.]]>
+      </doc>
+    </method>
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appHostName" type="java.lang.String"/>
+      <param name="appHostPort" type="int"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Register the application master. This must be called before any 
+ other interaction
+ @param appHostName Name of the host on which master is running
+ @param appHostPort Port master is listening on
+ @param appTrackingUrl URL at which the master info can be seen
+ @return <code>RegisterApplicationMasterResponse</code>
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appHostName" type="java.lang.String"/>
+      <param name="appHostPort" type="int"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <param name="placementConstraints" type="java.util.Map"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Register the application master. This must be called before any
+ other interaction
+ @param appHostName Name of the host on which master is running
+ @param appHostPort Port master is listening on
+ @param appTrackingUrl URL at which the master info can be seen
+ @param placementConstraints Placement Constraints mappings.
+ @return <code>RegisterApplicationMasterResponse</code>
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="allocate" return="org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="progressIndicator" type="float"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Request additional containers and receive new container allocations.
+ Requests made via <code>addContainerRequest</code> are sent to the
+ <code>ResourceManager</code>. New containers assigned to the master are
+ retrieved. Status of completed containers and node health updates are also
+ retrieved. This also doubles up as a heartbeat to the ResourceManager and
+ must be made periodically. The call may not always return any new
+ allocations of containers. App should not make concurrent allocate
+ requests. May cause request loss.
+ 
+ <p>
+ Note : If the user has not removed container requests that have already
+ been satisfied, then the re-register may end up sending the entire
+ container requests to the RM (including matched requests). Which would mean
+ the RM could end up giving it a lot of new allocated containers.
+ </p>
+ 
+ @param progressIndicator Indicates progress made by the master
+ @return the response of the allocate request
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="unregisterApplicationMaster"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <param name="appMessage" type="java.lang.String"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Unregister the application master. This must be called in the end.
+ @param appStatus Success/Failure status of the master
+ @param appMessage Diagnostics message on failure
+ @param appTrackingUrl New URL to get master info
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="addContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Request containers for resources before calling <code>allocate</code>
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="removeContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Remove previous container request. The previous container request may have 
+ already been sent to the ResourceManager. So even after the remove request 
+ the app must be prepared to receive an allocation for the previous request 
+ even after the remove request
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="requestContainerResourceChange"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use
+ {@link #requestContainerUpdate(Container, UpdateContainerRequest)}">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Request container resource change before calling <code>allocate</code>.
+ Any previous pending resource change request of the same container will be
+ removed.
+
+ Application that calls this method is expected to maintain the
+ <code>Container</code>s that are returned from previous successful
+ allocations or resource changes. By passing in the existing container and a
+ target resource capability to this method, the application requests the
+ ResourceManager to change the existing resource allocation to the target
+ resource allocation.
+
+ @deprecated use
+ {@link #requestContainerUpdate(Container, UpdateContainerRequest)}
+
+ @param container The container returned from the last successful resource
+                  allocation or resource change
+ @param capability  The target resource capability of the container]]>
+      </doc>
+    </method>
+    <method name="requestContainerUpdate"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <param name="updateContainerRequest" type="org.apache.hadoop.yarn.api.records.UpdateContainerRequest"/>
+      <doc>
+      <![CDATA[Request a container update before calling <code>allocate</code>.
+ Any previous pending update request of the same container will be
+ removed.
+
+ @param container The container returned from the last successful resource
+                  allocation or update
+ @param updateContainerRequest The <code>UpdateContainerRequest</code>.]]>
+      </doc>
+    </method>
+    <method name="releaseAssignedContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <doc>
+      <![CDATA[Release containers assigned by the Resource Manager. If the app cannot use
+ the container or wants to give up the container then it can release them.
+ The app needs to make new requests for the released resource capability if
+ it still needs it. eg. it released non-local resources
+ @param containerId]]>
+      </doc>
+    </method>
+    <method name="getAvailableResources" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the currently available resources in the cluster.
+ A valid value is available after a call to allocate has been made
+ @return Currently available resources]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeCount" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current number of nodes in the cluster.
+ A valid values is available after a call to allocate has been made
+ @return Current number of nodes in the cluster]]>
+      </doc>
+    </method>
+    <method name="getMatchingRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Get outstanding <code>ContainerRequest</code>s matching the given 
+ parameters. These ContainerRequests should have been added via
+ <code>addContainerRequest</code> earlier in the lifecycle. For performance,
+ the AMRMClient may return its internal collection directly without creating 
+ a copy. Users should not perform mutable operations on the return value.
+ Each collection in the list contains requests with identical 
+ <code>Resource</code> size that fit in the given capability. In a 
+ collection, requests will be returned in the same order as they were added.
+
+ NOTE: This API only matches Container requests that were created by the
+ client WITHOUT the allocationRequestId being set.
+
+ @return Collection of request matching the parameters]]>
+      </doc>
+    </method>
+    <method name="getMatchingRequests" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="executionType" type="org.apache.hadoop.yarn.api.records.ExecutionType"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Get outstanding <code>ContainerRequest</code>s matching the given
+ parameters. These ContainerRequests should have been added via
+ <code>addContainerRequest</code> earlier in the lifecycle. For performance,
+ the AMRMClient may return its internal collection directly without creating
+ a copy. Users should not perform mutable operations on the return value.
+ Each collection in the list contains requests with identical
+ <code>Resource</code> size that fit in the given capability. In a
+ collection, requests will be returned in the same order as they were added.
+ specify an <code>ExecutionType</code>.
+
+ NOTE: This API only matches Container requests that were created by the
+ client WITHOUT the allocationRequestId being set.
+
+ @param priority Priority
+ @param resourceName Location
+ @param executionType ExecutionType
+ @param capability Capability
+ @return Collection of request matching the parameters]]>
+      </doc>
+    </method>
+    <method name="getMatchingRequests" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="executionType" type="org.apache.hadoop.yarn.api.records.ExecutionType"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <param name="profile" type="java.lang.String"/>
+    </method>
+    <method name="getMatchingRequests" return="java.util.Collection"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allocationRequestId" type="long"/>
+      <doc>
+      <![CDATA[Get outstanding <code>ContainerRequest</code>s matching the given
+ allocationRequestId. These ContainerRequests should have been added via
+ <code>addContainerRequest</code> earlier in the lifecycle. For performance,
+ the AMRMClient may return its internal collection directly without creating
+ a copy. Users should not perform mutable operations on the return value.
+
+ NOTE: This API only matches Container requests that were created by the
+ client WITH the allocationRequestId being set to a non-default value.
+
+ @param allocationRequestId Allocation Request Id
+ @return Collection of request matching the parameters]]>
+      </doc>
+    </method>
+    <method name="updateBlacklist"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="blacklistAdditions" type="java.util.List"/>
+      <param name="blacklistRemovals" type="java.util.List"/>
+      <doc>
+      <![CDATA[Update application's blacklist with addition or removal resources.
+ 
+ @param blacklistAdditions list of resources which should be added to the 
+        application blacklist
+ @param blacklistRemovals list of resources which should be removed from the 
+        application blacklist]]>
+      </doc>
+    </method>
+    <method name="setNMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nmTokenCache" type="org.apache.hadoop.yarn.client.api.NMTokenCache"/>
+      <doc>
+      <![CDATA[Set the NM token cache for the <code>AMRMClient</code>. This cache must
+ be shared with the {@link NMClient} used to manage containers for the
+ <code>AMRMClient</code>
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @param nmTokenCache the NM token cache to use.]]>
+      </doc>
+    </method>
+    <method name="getNMTokenCache" return="org.apache.hadoop.yarn.client.api.NMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the NM token cache of the <code>AMRMClient</code>. This cache must be
+ shared with the {@link NMClient} used to manage containers for the
+ <code>AMRMClient</code>.
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @return the NM token cache.]]>
+      </doc>
+    </method>
+    <method name="registerTimelineV2Client"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="client" type="org.apache.hadoop.yarn.client.api.TimelineV2Client"/>
+      <doc>
+      <![CDATA[Register TimelineV2Client to AMRMClient. Writer's address for the timeline
+ V2 client will be updated dynamically if registered.
+
+ @param client the timeline v2 client to register]]>
+      </doc>
+    </method>
+    <method name="getRegisteredTimelineV2Client" return="org.apache.hadoop.yarn.client.api.TimelineV2Client"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get registered timeline v2 client.
+ @return the registered timeline v2 client]]>
+      </doc>
+    </method>
+    <method name="updateTrackingUrl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="trackingUrl" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Update application's tracking url on next heartbeat.
+
+ @param trackingUrl new tracking url for this application]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="java.util.function.Supplier"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each 1000 ms.
+ See also {@link #waitFor(java.util.function.Supplier, int)}
+ and {@link #waitFor(java.util.function.Supplier, int, int)}
+ @param check the condition for which it should wait]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="java.util.function.Supplier"/>
+      <param name="checkEveryMillis" type="int"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each
+ <code>checkEveryMillis</code> ms.
+ See also {@link #waitFor(java.util.function.Supplier, int, int)}
+ @param check user defined checker
+ @param checkEveryMillis interval to call <code>check</code>]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="java.util.function.Supplier"/>
+      <param name="checkEveryMillis" type="int"/>
+      <param name="logInterval" type="int"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each
+ <code>checkEveryMillis</code> ms. In the main loop, this method will log
+ the message "waiting in main loop" for each <code>logInterval</code> times
+ iteration to confirm the thread is alive.
+ @param check user defined checker
+ @param checkEveryMillis interval to call <code>check</code>
+ @param logInterval interval to log for each]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.AMRMClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.NMClient -->
+  <class name="NMClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createNMClient" return="org.apache.hadoop.yarn.client.api.NMClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of NMClient.]]>
+      </doc>
+    </method>
+    <method name="createNMClient" return="org.apache.hadoop.yarn.client.api.NMClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Create a new instance of NMClient.]]>
+      </doc>
+    </method>
+    <method name="startContainer" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <param name="containerLaunchContext" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Start an allocated container.</p>
+
+ <p>The <code>ApplicationMaster</code> or other applications that use the
+ client must provide the details of the allocated container, including the
+ Id, the assigned node's Id and the token via {@link Container}. In
+ addition, the AM needs to provide the {@link ContainerLaunchContext} as
+ well.</p>
+
+ @param container the allocated container
+ @param containerLaunchContext the context information needed by the
+                               <code>NodeManager</code> to launch the
+                               container
+ @return a map between the auxiliary service names and their outputs
+ @throws YarnException YarnException.
+ @throws IOException IOException.]]>
+      </doc>
+    </method>
+    <method name="increaseContainerResource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Increase the resource of a container.</p>
+
+ <p>The <code>ApplicationMaster</code> or other applications that use the
+ client must provide the details of the container, including the Id and
+ the target resource encapsulated in the updated container token via
+ {@link Container}.
+ </p>
+
+ @param container the container with updated token.
+
+ @throws YarnException YarnException.
+ @throws IOException IOException.]]>
+      </doc>
+    </method>
+    <method name="updateContainerResource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Update the resources of a container.</p>
+
+ <p>The <code>ApplicationMaster</code> or other applications that use the
+ client must provide the details of the container, including the Id and
+ the target resource encapsulated in the updated container token via
+ {@link Container}.
+ </p>
+
+ @param container the container with updated token.
+
+ @throws YarnException YarnException.
+ @throws IOException IOException.]]>
+      </doc>
+    </method>
+    <method name="stopContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Stop an started container.</p>
+
+ @param containerId the Id of the started container
+ @param nodeId the Id of the <code>NodeManager</code>
+
+ @throws YarnException YarnException.
+ @throws IOException IOException.]]>
+      </doc>
+    </method>
+    <method name="getContainerStatus" return="org.apache.hadoop.yarn.api.records.ContainerStatus"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Query the status of a container.</p>
+
+ @param containerId the Id of the started container
+ @param nodeId the Id of the <code>NodeManager</code>
+ 
+ @return the status of a container.
+
+ @throws YarnException YarnException.
+ @throws IOException IOException.]]>
+      </doc>
+    </method>
+    <method name="reInitializeContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="containerLaunchContex" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <param name="autoCommit" type="boolean"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Re-Initialize the Container.</p>
+
+ @param containerId the Id of the container to Re-Initialize.
+ @param containerLaunchContex the updated ContainerLaunchContext.
+ @param autoCommit commit re-initialization automatically ?
+
+ @throws YarnException YarnException.
+ @throws IOException IOException.]]>
+      </doc>
+    </method>
+    <method name="restartContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Restart the specified container.</p>
+
+ @param containerId the Id of the container to restart.
+
+ @throws YarnException YarnException.
+ @throws IOException IOException.]]>
+      </doc>
+    </method>
+    <method name="rollbackLastReInitialization"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Rollback last reInitialization of the specified container.</p>
+
+ @param containerId the Id of the container to restart.
+
+ @throws YarnException YarnException.
+ @throws IOException IOException.]]>
+      </doc>
+    </method>
+    <method name="commitLastReInitialization"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>Commit last reInitialization of the specified container.</p>
+
+ @param containerId the Id of the container to commit reInitialize.
+
+ @throws YarnException YarnException.
+ @throws IOException IOException.]]>
+      </doc>
+    </method>
+    <method name="cleanupRunningContainersOnStop"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="enabled" type="boolean"/>
+      <doc>
+      <![CDATA[<p>Set whether the containers that are started by this client, and are
+ still running should be stopped when the client stops. By default, the
+ feature should be enabled.</p> However, containers will be stopped only  
+ when service is stopped. i.e. after {@link NMClient#stop()}. 
+
+ @param enabled whether the feature is enabled or not]]>
+      </doc>
+    </method>
+    <method name="setNMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nmTokenCache" type="org.apache.hadoop.yarn.client.api.NMTokenCache"/>
+      <doc>
+      <![CDATA[Set the NM Token cache of the <code>NMClient</code>. This cache must be
+ shared with the {@link AMRMClient} that requested the containers managed
+ by this <code>NMClient</code>
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @param nmTokenCache the NM token cache to use.]]>
+      </doc>
+    </method>
+    <method name="getNMTokenCache" return="org.apache.hadoop.yarn.client.api.NMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the NM token cache of the <code>NMClient</code>. This cache must be
+ shared with the {@link AMRMClient} that requested the containers managed
+ by this <code>NMClient</code>
+ <p>
+ If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
+ singleton instance will be used.
+
+ @return the NM token cache]]>
+      </doc>
+    </method>
+    <method name="getNodeIdOfStartedContainer" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <doc>
+      <![CDATA[Get the NodeId of the node on which container is running. It returns
+ null if the container if container is not found or if it is not running.
+
+ @param containerId Container Id of the container.
+ @return NodeId of the container on which it is running.]]>
+      </doc>
+    </method>
+    <method name="localize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <param name="localResources" type="java.util.Map"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Localize resources for a container.
+ @param containerId     the ID of the container
+ @param nodeId          node Id of the container
+ @param localResources  resources to localize]]>
+      </doc>
+    </method>
+    <method name="getLocalizationStatuses" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the localization statuses of a container.
+
+ @param containerId   the Id of the container
+ @param nodeId        node Id of the container
+
+ @return the status of a container.
+
+ @throws YarnException YarnException.
+ @throws IOException IOException.]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.NMClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.NMTokenCache -->
+  <class name="NMTokenCache" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMTokenCache"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Creates a NM token cache instance.]]>
+      </doc>
+    </constructor>
+    <method name="getSingleton" return="org.apache.hadoop.yarn.client.api.NMTokenCache"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the singleton NM token cache.
+
+ @return the singleton NM token cache.]]>
+      </doc>
+    </method>
+    <method name="getNMToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns NMToken, null if absent. Only the singleton obtained from
+ {@link #getSingleton()} is looked at for the tokens. If you are using your
+ own NMTokenCache that is different from the singleton, use
+ {@link #getToken(String) }
+ 
+ @param nodeAddr
+ @return {@link Token} NMToken required for communicating with node manager]]>
+      </doc>
+    </method>
+    <method name="setNMToken"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <param name="token" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <doc>
+      <![CDATA[Sets the NMToken for node address only in the singleton obtained from
+ {@link #getSingleton()}. If you are using your own NMTokenCache that is
+ different from the singleton, use {@link #setToken(String, Token) }
+ 
+ @param nodeAddr
+          node address (host:port)
+ @param token
+          NMToken]]>
+      </doc>
+    </method>
+    <method name="getToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Returns NMToken, null if absent
+ @param nodeAddr
+ @return {@link Token} NMToken required for communicating with node
+         manager]]>
+      </doc>
+    </method>
+    <method name="setToken"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeAddr" type="java.lang.String"/>
+      <param name="token" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <doc>
+      <![CDATA[Sets the NMToken for node address
+ @param nodeAddr node address (host:port)
+ @param token NMToken]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[NMTokenCache manages NMTokens required for an Application Master
+ communicating with individual NodeManagers.
+ <p>
+ By default YARN client libraries {@link AMRMClient} and {@link NMClient} use
+ {@link #getSingleton()} instance of the cache.
+ <ul>
+   <li>
+     Using the singleton instance of the cache is appropriate when running a
+     single ApplicationMaster in the same JVM.
+   </li>
+   <li>
+     When using the singleton, users don't need to do anything special,
+     {@link AMRMClient} and {@link NMClient} are already set up to use the
+     default singleton {@link NMTokenCache}
+     </li>
+ </ul>
+ If running multiple Application Masters in the same JVM, a different cache
+ instance should be used for each Application Master.
+ <ul>
+   <li>
+     If using the {@link AMRMClient} and the {@link NMClient}, setting up
+     and using an instance cache is as follows:
+ <pre>
+   NMTokenCache nmTokenCache = new NMTokenCache();
+   AMRMClient rmClient = AMRMClient.createAMRMClient();
+   NMClient nmClient = NMClient.createNMClient();
+   nmClient.setNMTokenCache(nmTokenCache);
+   ...
+ </pre>
+   </li>
+   <li>
+     If using the {@link AMRMClientAsync} and the {@link NMClientAsync},
+     setting up and using an instance cache is as follows:
+ <pre>
+   NMTokenCache nmTokenCache = new NMTokenCache();
+   AMRMClient rmClient = AMRMClient.createAMRMClient();
+   NMClient nmClient = NMClient.createNMClient();
+   nmClient.setNMTokenCache(nmTokenCache);
+   AMRMClientAsync rmClientAsync = new AMRMClientAsync(rmClient, 1000, [AMRM_CALLBACK]);
+   NMClientAsync nmClientAsync = new NMClientAsync("nmClient", nmClient, [NM_CALLBACK]);
+   ...
+ </pre>
+   </li>
+   <li>
+     If using {@link ApplicationMasterProtocol} and
+     {@link ContainerManagementProtocol} directly, setting up and using an
+     instance cache is as follows:
+ <pre>
+   NMTokenCache nmTokenCache = new NMTokenCache();
+   ...
+   ApplicationMasterProtocol amPro = ClientRMProxy.createRMProxy(conf, ApplicationMasterProtocol.class);
+   ...
+   AllocateRequest allocateRequest = ...
+   ...
+   AllocateResponse allocateResponse = rmClient.allocate(allocateRequest);
+   for (NMToken token : allocateResponse.getNMTokens()) {
+     nmTokenCache.setToken(token.getNodeId().toString(), token.getToken());
+   }
+   ...
+   ContainerManagementProtocolProxy nmPro = ContainerManagementProtocolProxy(conf, nmTokenCache);
+   ...
+   nmPro.startContainer(container, containerContext);
+   ...
+ </pre>
+   </li>
+ </ul>
+ It is also possible to mix the usage of a client ({@code AMRMClient} or
+ {@code NMClient}, or the async versions of them) with a protocol proxy
+ ({@code ContainerManagementProtocolProxy} or
+ {@code ApplicationMasterProtocol}).]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.NMTokenCache -->
+  <!-- start class org.apache.hadoop.yarn.client.api.SharedCacheClient -->
+  <class name="SharedCacheClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SharedCacheClient" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createSharedCacheClient" return="org.apache.hadoop.yarn.client.api.SharedCacheClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="use" return="org.apache.hadoop.yarn.api.records.URL"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="resourceKey" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ The method to claim a resource with the <code>SharedCacheManager.</code>
+ The client uses a checksum to identify the resource and an
+ {@link ApplicationId} to identify which application will be using the
+ resource.
+ </p>
+
+ <p>
+ The <code>SharedCacheManager</code> responds with whether or not the
+ resource exists in the cache. If the resource exists, a <code>URL</code> to
+ the resource in the shared cache is returned. If the resource does not
+ exist, null is returned instead.
+ </p>
+
+ <p>
+ Once a URL has been returned for a resource, that URL is safe to use for
+ the lifetime of the application that corresponds to the provided
+ ApplicationId.
+ </p>
+
+ @param applicationId ApplicationId of the application using the resource
+ @param resourceKey the key (i.e. checksum) that identifies the resource
+ @return URL to the resource, or null if it does not exist]]>
+      </doc>
+    </method>
+    <method name="release"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="resourceKey" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ The method to release a resource with the <code>SharedCacheManager.</code>
+ This method is called once an application is no longer using a claimed
+ resource in the shared cache. The client uses a checksum to identify the
+ resource and an {@link ApplicationId} to identify which application is
+ releasing the resource.
+ </p>
+ 
+ <p>
+ Note: This method is an optimization and the client is not required to call
+ it for correctness.
+ </p>
+ 
+ @param applicationId ApplicationId of the application releasing the
+          resource
+ @param resourceKey the key (i.e. checksum) that identifies the resource]]>
+      </doc>
+    </method>
+    <method name="getFileChecksum" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="sourceFile" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[A convenience method to calculate the checksum of a specified file.
+ 
+ @param sourceFile A path to the input file
+ @return A hex string containing the checksum digest
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[This is the client for YARN's shared cache.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.SharedCacheClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.YarnClient -->
+  <class name="YarnClient" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YarnClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createYarnClient" return="org.apache.hadoop.yarn.client.api.YarnClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of YarnClient.]]>
+      </doc>
+    </method>
+    <method name="createApplication" return="org.apache.hadoop.yarn.client.api.YarnClientApplication"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Obtain a {@link YarnClientApplication} for a new application,
+ which in turn contains the {@link ApplicationSubmissionContext} and
+ {@link org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse}
+ objects.
+ </p>
+
+ @return {@link YarnClientApplication} built for a new application
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="submitApplication" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appContext" type="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Submit a new application to <code>YARN.</code> It is a blocking call - it
+ will not return {@link ApplicationId} until the submitted application is
+ submitted successfully and accepted by the ResourceManager.
+ </p>
+ 
+ <p>
+ Users should provide an {@link ApplicationId} as part of the parameter
+ {@link ApplicationSubmissionContext} when submitting a new application,
+ otherwise it will throw the {@link ApplicationIdNotProvidedException}.
+ </p>
+
+ <p>This internally calls {@link ApplicationClientProtocol#submitApplication
+ (SubmitApplicationRequest)}, and after that, it internally invokes
+ {@link ApplicationClientProtocol#getApplicationReport
+ (GetApplicationReportRequest)} and waits till it can make sure that the
+ application gets properly submitted. If RM fails over or RM restart
+ happens before ResourceManager saves the application's state,
+ {@link ApplicationClientProtocol
+ #getApplicationReport(GetApplicationReportRequest)} will throw
+ the {@link ApplicationNotFoundException}. This API automatically resubmits
+ the application with the same {@link ApplicationSubmissionContext} when it
+ catches the {@link ApplicationNotFoundException}</p>
+
+ @param appContext
+          {@link ApplicationSubmissionContext} containing all the details
+          needed to submit a new application
+ @return {@link ApplicationId} of the accepted application
+ @throws YarnException
+ @throws IOException
+ @see #createApplication()]]>
+      </doc>
+    </method>
+    <method name="failApplicationAttempt"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Fail an application attempt identified by given ID.
+ </p>
+
+ @param applicationAttemptId
+          {@link ApplicationAttemptId} of the attempt to fail.
+ @throws YarnException
+           in case of errors or if YARN rejects the request due to
+           access-control restrictions.
+ @throws IOException
+ @see #getQueueAclsInfo()]]>
+      </doc>
+    </method>
+    <method name="killApplication"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Kill an application identified by given ID.
+ </p>
+ 
+ @param applicationId
+          {@link ApplicationId} of the application that needs to be killed
+ @throws YarnException
+           in case of errors or if YARN rejects the request due to
+           access-control restrictions.
+ @throws IOException
+ @see #getQueueAclsInfo()]]>
+      </doc>
+    </method>
+    <method name="killApplication"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="diagnostics" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Kill an application identified by given ID.
+ </p>
+ @param applicationId {@link ApplicationId} of the application that needs to
+          be killed
+ @param diagnostics for killing an application.
+ @throws YarnException in case of errors or if YARN rejects the request due
+           to access-control restrictions.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationReport" return="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Application.
+ </p>
+ 
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+ 
+ <p>
+ If the user does not have <code>VIEW_APP</code> access then the following
+ fields in the report will be set to stubbed values:
+ <ul>
+ <li>host - set to "N/A"</li>
+ <li>RPC port - set to -1</li>
+ <li>client token - set to "N/A"</li>
+ <li>diagnostics - set to "N/A"</li>
+ <li>tracking URL - set to "N/A"</li>
+ <li>original tracking URL - set to "N/A"</li>
+ <li>resource usage report - all values are -1</li>
+ </ul>
+ 
+ @param appId
+          {@link ApplicationId} of the application that needs a report
+ @return application report
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAMRMToken" return="org.apache.hadoop.security.token.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the AMRM token of the application.
+ <p>
+ The AMRM token is required for AM to RM scheduling operations. For 
+ managed Application Masters YARN takes care of injecting it. For unmanaged
+ Applications Masters, the token must be obtained via this method and set
+ in the {@link org.apache.hadoop.security.UserGroupInformation} of the
+ current user.
+ <p>
+ The AMRM token will be returned only if all the following conditions are
+ met:
+ <ul>
+   <li>the requester is the owner of the ApplicationMaster</li>
+   <li>the application master is an unmanaged ApplicationMaster</li>
+   <li>the application master is in ACCEPTED state</li>
+ </ul>
+ Else this method returns NULL.
+
+ @param appId {@link ApplicationId} of the application to get the AMRM token
+ @return the AMRM token if available
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of all Applications in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @return a list of reports of all running applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTypes" type="java.util.Set"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications
+ matching the given application types in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationTypes set of application types you are interested in
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications matching the given
+ application states in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationStates set of application states you are interested in
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTypes" type="java.util.Set"/>
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications matching the given
+ application types and application states in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationTypes set of application types you are interested in
+ @param applicationStates set of application states you are interested in
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationTypes" type="java.util.Set"/>
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <param name="applicationTags" type="java.util.Set"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications matching the given
+ application types, application states and application tags in the cluster.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param applicationTypes set of application types you are interested in
+ @param applicationStates set of application states you are interested in
+ @param applicationTags set of application tags you are interested in
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queues" type="java.util.Set"/>
+      <param name="users" type="java.util.Set"/>
+      <param name="applicationTypes" type="java.util.Set"/>
+      <param name="applicationStates" type="java.util.EnumSet"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report (ApplicationReport) of Applications matching the given users,
+ queues, application types and application states in the cluster. If any of
+ the params is set to null, it is not used when filtering.
+ </p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param queues set of queues you are interested in
+ @param users set of users you are interested in
+ @param applicationTypes set of application types you are interested in
+ @param applicationStates set of application states you are interested in
+ @return a list of reports of applications
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplications" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a list of ApplicationReports that match the given
+ {@link GetApplicationsRequest}.
+</p>
+
+ <p>
+ If the user does not have <code>VIEW_APP</code> access for an application
+ then the corresponding report will be filtered as described in
+ {@link #getApplicationReport(ApplicationId)}.
+ </p>
+
+ @param request the request object to get the list of applications.
+ @return The list of ApplicationReports that match the request
+ @throws YarnException Exception specific to YARN.
+ @throws IOException Exception mostly related to connection errors.]]>
+      </doc>
+    </method>
+    <method name="getYarnClusterMetrics" return="org.apache.hadoop.yarn.api.records.YarnClusterMetrics"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get metrics ({@link YarnClusterMetrics}) about the cluster.
+ </p>
+ 
+ @return cluster metrics
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getNodeReports" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="states" type="org.apache.hadoop.yarn.api.records.NodeState[]"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of nodes ({@link NodeReport}) in the cluster.
+ </p>
+ 
+ @param states The {@link NodeState}s to filter on. If no filter states are
+          given, nodes in all states will be returned.
+ @return A list of node reports
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRMDelegationToken" return="org.apache.hadoop.yarn.api.records.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="org.apache.hadoop.io.Text"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a delegation token so as to be able to talk to YARN using those tokens.
+ 
+ @param renewer
+          Address of the renewer who can renew these tokens when needed by
+          securely talking to YARN.
+ @return a delegation token ({@link Token}) that can be used to
+         talk to YARN
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueInfo" return="org.apache.hadoop.yarn.api.records.QueueInfo"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="queueName" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about a given <em>queue</em>.
+ </p>
+ 
+ @param queueName
+          Name of the queue whose information is needed
+ @return queue information
+ @throws YarnException
+           in case of errors or if YARN rejects the request due to
+           access-control restrictions.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getAllQueues" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about all queues, recursively if there
+ is a hierarchy
+ </p>
+ 
+ @return a list of queue-information for all queues
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRootQueueInfos" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about top level queues.
+ </p>
+ 
+ @return a list of queue-information for all the top-level queues
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getChildQueueInfos" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="parent" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information ({@link QueueInfo}) about all the immediate children queues
+ of the given queue
+ </p>
+ 
+ @param parent
+          Name of the queue whose child-queues' information is needed
+ @return a list of queue-information for all queues who are direct children
+         of the given parent queue.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getQueueAclsInfo" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get information about <em>acls</em> for <em>current user</em> on all the
+ existing queues.
+ </p>
+ 
+ @return a list of queue acls ({@link QueueUserACLInfo}) for
+         <em>current user</em>
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttemptReport" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given ApplicationAttempt.
+ </p>
+ 
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+ 
+ @param applicationAttemptId
+          {@link ApplicationAttemptId} of the application attempt that needs
+          a report
+ @return application attempt report
+ @throws YarnException
+ @throws ApplicationAttemptNotFoundException if application attempt
+         not found
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttempts" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (ApplicationAttempts) of Application in the cluster.
+ </p>
+ 
+ @param applicationId application id of the app
+ @return a list of reports for all application attempts for specified
+         application.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerReport" return="org.apache.hadoop.yarn.api.records.ContainerReport"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of the given Container.
+ </p>
+ 
+ <p>
+ In secure mode, <code>YARN</code> verifies access to the application, queue
+ etc. before accepting the request.
+ </p>
+ 
+ @param containerId
+          {@link ContainerId} of the container that needs a report
+ @return container report
+ @throws YarnException
+ @throws ContainerNotFoundException if container not found.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainers" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a report of all (Containers) of ApplicationAttempt in the cluster.
+ </p>
+ 
+ @param applicationAttemptId application attempt id
+ @return a list of reports of all containers for specified application
+         attempts
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="moveApplicationAcrossQueues"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="queue" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Attempts to move the given application to the given queue.
+ </p>
+ 
+ @param appId
+    Application to move.
+ @param queue
+    Queue to place it in to.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="createReservation" return="org.apache.hadoop.yarn.api.protocolrecords.GetNewReservationResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Obtain a {@link GetNewReservationResponse} for a new reservation,
+ which contains the {@link ReservationId} object.
+ </p>
+
+ @return The {@link GetNewReservationResponse} containing a new
+         {@link ReservationId} object.
+ @throws YarnException if reservation cannot be created.
+ @throws IOException if reservation cannot be created.]]>
+      </doc>
+    </method>
+    <method name="submitReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to submit a new reservation to the
+ {@code ResourceManager}.
+ </p>
+ 
+ <p>
+ The client packages all details of its request in a
+ {@link ReservationSubmissionRequest} object. This contains information
+ about the amount of capacity, temporal constraints, and gang needs.
+ Furthermore, the reservation might be composed of multiple stages, with
+ ordering dependencies among them.
+ </p>
+ 
+ <p>
+ In order to respond, a new admission control component in the
+ {@code ResourceManager} performs an analysis of the resources that have
+ been committed over the period of time the user is requesting, verify that
+ the user requests can be fulfilled, and that it respect a sharing policy
+ (e.g., {@code CapacityOverTimePolicy}). Once it has positively determined
+ that the ReservationRequest is satisfiable the {@code ResourceManager}
+ answers with a {@link ReservationSubmissionResponse} that includes a
+ {@link ReservationId}. Upon failure to find a valid allocation the response
+ is an exception with the message detailing the reason of failure.
+ </p>
+ 
+ <p>
+ The semantics guarantees that the {@link ReservationId} returned,
+ corresponds to a valid reservation existing in the time-range request by
+ the user. The amount of capacity dedicated to such reservation can vary
+ overtime, depending of the allocation that has been determined. But it is
+ guaranteed to satisfy all the constraint expressed by the user in the
+ {@link ReservationDefinition}
+ </p>
+ 
+ @param request request to submit a new Reservation
+ @return response contains the {@link ReservationId} on accepting the
+         submission
+ @throws YarnException if the reservation cannot be created successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="updateReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to update an existing Reservation. This is
+ referred to as a re-negotiation process, in which a user that has
+ previously submitted a Reservation.
+ </p>
+ 
+ <p>
+ The allocation is attempted by virtually substituting all previous
+ allocations related to this Reservation with new ones, that satisfy the new
+ {@link ReservationDefinition}. Upon success the previous allocation is
+ atomically substituted by the new one, and on failure (i.e., if the system
+ cannot find a valid allocation for the updated request), the previous
+ allocation remains valid.
+ </p>
+ 
+ @param request to update an existing Reservation (the
+          {@link ReservationUpdateRequest} should refer to an existing valid
+          {@link ReservationId})
+ @return response empty on successfully updating the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           updated successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="deleteReservation" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to remove an existing Reservation.
+ </p>
+ 
+ @param request to remove an existing Reservation (the
+          {@link ReservationDeleteRequest} should refer to an existing valid
+          {@link ReservationId})
+ @return response empty on successfully deleting the existing reservation
+ @throws YarnException if the request is invalid or reservation cannot be
+           deleted successfully
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="listReservations" return="org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by clients to get the list of reservations in a plan.
+ The reservationId will be used to search for reservations to list if it is
+ provided. Otherwise, it will select active reservations within the
+ startTime and endTime (inclusive).
+ </p>
+
+ @param request to list reservations in a plan. Contains fields to select
+                String queue, ReservationId reservationId, long startTime,
+                long endTime, and a bool includeReservationAllocations.
+
+                queue: Required. Cannot be null or empty. Refers to the
+                reservable queue in the scheduler that was selected when
+                creating a reservation submission
+                {@link ReservationSubmissionRequest}.
+
+                reservationId: Optional. If provided, other fields will
+                be ignored.
+
+                startTime: Optional. If provided, only reservations that
+                end after the startTime will be selected. This defaults
+                to 0 if an invalid number is used.
+
+                endTime: Optional. If provided, only reservations that
+                start on or before endTime will be selected. This defaults
+                to Long.MAX_VALUE if an invalid number is used.
+
+                includeReservationAllocations: Optional. Flag that
+                determines whether the entire reservation allocations are
+                to be returned. Reservation allocations are subject to
+                change in the event of re-planning as described by
+                {@link ReservationDefinition}.
+
+ @return response that contains information about reservations that are
+                being searched for.
+ @throws YarnException if the request is invalid
+ @throws IOException if the request failed otherwise]]>
+      </doc>
+    </method>
+    <method name="getNodeToLabels" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node to labels mappings in existing cluster
+ </p>
+ 
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getLabelsToNodes" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get labels to nodes mapping
+ in existing cluster
+ </p>
+
+ @return node to labels mappings
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getLabelsToNodes" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="labels" type="java.util.Set"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get labels to nodes mapping
+ for specified labels in existing cluster
+ </p>
+
+ @param labels labels for which labels to nodes mapping has to be retrieved
+ @return labels to nodes mappings for specific labels
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeLabels" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node labels in the cluster
+ </p>
+
+ @return cluster node labels collection
+ @throws YarnException when there is a failure in
+           {@link ApplicationClientProtocol}
+ @throws IOException when there is a failure in
+           {@link ApplicationClientProtocol}]]>
+      </doc>
+    </method>
+    <method name="updateApplicationPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to set priority of an application
+ </p>
+ @param applicationId
+ @param priority
+ @return updated priority of an application.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="signalToContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="command" type="org.apache.hadoop.yarn.api.records.SignalContainerCommand"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Signal a container identified by given ID.
+ </p>
+
+ @param containerId
+          {@link ContainerId} of the container that needs to be signaled
+ @param command the signal container command
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="updateApplicationTimeouts" return="org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getResourceProfiles" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get the resource profiles available in the RM.
+ </p>
+ @return a Map of the resource profile names to their capabilities
+ @throws YARNFeatureNotEnabledException if resource-profile is disabled
+ @throws YarnException if any error happens inside YARN
+ @throws IOException in case of other errors]]>
+      </doc>
+    </method>
+    <method name="getResourceProfile" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="profile" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get the details of a specific resource profile from the RM.
+ </p>
+ @param profile the profile name
+ @return resource profile name with its capabilities
+ @throws YARNFeatureNotEnabledException if resource-profile is disabled
+ @throws YarnException if any error happens inside YARN
+ @throws IOException in case of other others]]>
+      </doc>
+    </method>
+    <method name="getResourceTypeInfo" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ Get available resource types supported by RM.
+ </p>
+ @return list of supported resource types with detailed information
+ @throws YarnException if any issue happens inside YARN
+ @throws IOException in case of other others]]>
+      </doc>
+    </method>
+    <method name="getClusterAttributes" return="java.util.Set"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get node attributes in the cluster.
+ </p>
+
+ @return cluster node attributes collection
+ @throws YarnException when there is a failure in
+                       {@link ApplicationClientProtocol}
+ @throws IOException   when there is a failure in
+                       {@link ApplicationClientProtocol}]]>
+      </doc>
+    </method>
+    <method name="getAttributesToNodes" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="attributes" type="java.util.Set"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get mapping of AttributeKey to associated
+ NodeToAttributeValue list for specified node attributeKeys in the cluster.
+ </p>
+
+ @param attributes AttributeKeys for which associated NodeToAttributeValue
+          mapping value has to be retrieved. If empty or null is set then
+          will return mapping for all attributeKeys in the cluster
+ @return mapping of AttributeKey to List of associated
+         NodeToAttributeValue's.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getNodeToAttributes" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="hostNames" type="java.util.Set"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get all node to attribute mapping in
+ existing cluster.
+ </p>
+
+ @param hostNames HostNames for which host to attributes mapping has to
+                  be retrived.If empty or null is set then will return
+                  all nodes to attributes mapping in cluster.
+ @return Node to attribute mappings
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="shellToContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="command" type="org.apache.hadoop.yarn.api.records.ShellContainerCommand"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[<p>
+ The interface used by client to get a shell to a container.
+ </p>
+
+ @param containerId Container ID
+ @param command Shell type
+ @throws IOException if connection fails.]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.YarnClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.YarnClientApplication -->
+  <class name="YarnClientApplication" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="YarnClientApplication" type="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse, org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getNewApplicationResponse" return="org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationSubmissionContext" return="org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.YarnClientApplication -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api.async">
+  <!-- start class org.apache.hadoop.yarn.client.api.async.AMRMClientAsync -->
+  <class name="AMRMClientAsync" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMClientAsync" type="int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.AbstractCallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMClientAsync" type="org.apache.hadoop.yarn.client.api.AMRMClient, int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.AbstractCallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMClientAsync" type="int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMClientAsync" type="org.apache.hadoop.yarn.client.api.AMRMClient, int, org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.AbstractCallbackHandler"/>
+      <doc>
+      <![CDATA[<p>Create a new instance of AMRMClientAsync.</p>
+
+ @param intervalMs heartbeat interval in milliseconds between AM and RM
+ @param callbackHandler callback handler that processes responses from
+                        the <code>ResourceManager</code>]]>
+      </doc>
+    </method>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="client" type="org.apache.hadoop.yarn.client.api.AMRMClient"/>
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.AbstractCallbackHandler"/>
+      <doc>
+      <![CDATA[<p>Create a new instance of AMRMClientAsync.</p>
+
+ @param client the AMRMClient instance
+ @param intervalMs heartbeat interval in milliseconds between AM and RM
+ @param callbackHandler callback handler that processes responses from
+                        the <code>ResourceManager</code>]]>
+      </doc>
+    </method>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createAMRMClientAsync(int,
+             AMRMClientAsync.AbstractCallbackHandler)} instead.">
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"/>
+      <doc>
+      <![CDATA[@deprecated Use {@link #createAMRMClientAsync(int,
+             AMRMClientAsync.AbstractCallbackHandler)} instead.]]>
+      </doc>
+    </method>
+    <method name="createAMRMClientAsync" return="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createAMRMClientAsync(AMRMClient,
+             int, AMRMClientAsync.AbstractCallbackHandler)} instead.">
+      <param name="client" type="org.apache.hadoop.yarn.client.api.AMRMClient"/>
+      <param name="intervalMs" type="int"/>
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"/>
+      <doc>
+      <![CDATA[@deprecated Use {@link #createAMRMClientAsync(AMRMClient,
+             int, AMRMClientAsync.AbstractCallbackHandler)} instead.]]>
+      </doc>
+    </method>
+    <method name="setHeartbeatInterval"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="interval" type="int"/>
+    </method>
+    <method name="getMatchingRequests" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+    </method>
+    <method name="addSchedulingRequests"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="schedulingRequests" type="java.util.Collection"/>
+      <doc>
+      <![CDATA[Add a Collection of SchedulingRequests. The AMRMClient will ensure that
+ all requests in the same batch are sent in the same allocate call.
+ @param schedulingRequests Collection of Scheduling Requests.]]>
+      </doc>
+    </method>
+    <method name="getMatchingRequests" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="priority" type="org.apache.hadoop.yarn.api.records.Priority"/>
+      <param name="resourceName" type="java.lang.String"/>
+      <param name="executionType" type="org.apache.hadoop.yarn.api.records.ExecutionType"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Returns all matching ContainerRequests that match the given Priority,
+ ResourceName, ExecutionType and Capability.
+
+ NOTE: This matches only requests that were made by the client WITHOUT the
+ allocationRequestId specified.
+
+ @param priority Priority.
+ @param resourceName Location.
+ @param executionType ExecutionType.
+ @param capability Capability.
+ @return All matching ContainerRequests]]>
+      </doc>
+    </method>
+    <method name="getMatchingRequests" return="java.util.Collection"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="allocationRequestId" type="long"/>
+      <doc>
+      <![CDATA[Returns all matching ContainerRequests that match the given
+ AllocationRequestId.
+
+ NOTE: This matches only requests that were made by the client WITH the
+ allocationRequestId specified.
+
+ @param allocationRequestId AllocationRequestId.
+ @return All matching ContainerRequests]]>
+      </doc>
+    </method>
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appHostName" type="java.lang.String"/>
+      <param name="appHostPort" type="int"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Registers this application master with the resource manager. On successful
+ registration, starts the heartbeating thread.
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appHostName" type="java.lang.String"/>
+      <param name="appHostPort" type="int"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <param name="placementConstraints" type="java.util.Map"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Register the application master. This must be called before any
+ other interaction
+ @param appHostName Name of the host on which master is running
+ @param appHostPort Port master is listening on
+ @param appTrackingUrl URL at which the master info can be seen
+ @param placementConstraints Placement Constraints mappings.
+ @return <code>RegisterApplicationMasterResponse</code>
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="unregisterApplicationMaster"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"/>
+      <param name="appMessage" type="java.lang.String"/>
+      <param name="appTrackingUrl" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Unregister the application master. This must be called in the end.
+ @param appStatus Success/Failure status of the master
+ @param appMessage Diagnostics message on failure
+ @param appTrackingUrl New URL to get master info
+ @throws YarnException
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="addContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Request containers for resources before calling <code>allocate</code>
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="removeContainerRequest"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="req" type="T"/>
+      <doc>
+      <![CDATA[Remove previous container request. The previous container request may have 
+ already been sent to the ResourceManager. So even after the remove request 
+ the app must be prepared to receive an allocation for the previous request 
+ even after the remove request
+ @param req Resource request]]>
+      </doc>
+    </method>
+    <method name="requestContainerResourceChange"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="use
+ {@link #requestContainerUpdate(Container, UpdateContainerRequest)}">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <param name="capability" type="org.apache.hadoop.yarn.api.records.Resource"/>
+      <doc>
+      <![CDATA[Request container resource change before calling <code>allocate</code>.
+ Any previous pending resource change request of the same container will be
+ removed.
+
+ Application that calls this method is expected to maintain the
+ <code>Container</code>s that are returned from previous successful
+ allocations or resource changes. By passing in the existing container and a
+ target resource capability to this method, the application requests the
+ ResourceManager to change the existing resource allocation to the target
+ resource allocation.
+
+ @deprecated use
+ {@link #requestContainerUpdate(Container, UpdateContainerRequest)}
+
+ @param container The container returned from the last successful resource
+                  allocation or resource change
+ @param capability  The target resource capability of the container]]>
+      </doc>
+    </method>
+    <method name="requestContainerUpdate"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <param name="updateContainerRequest" type="org.apache.hadoop.yarn.api.records.UpdateContainerRequest"/>
+      <doc>
+      <![CDATA[Request a container update before calling <code>allocate</code>.
+ Any previous pending update request of the same container will be
+ removed.
+
+ @param container The container returned from the last successful resource
+                  allocation or update
+ @param updateContainerRequest The <code>UpdateContainerRequest</code>.]]>
+      </doc>
+    </method>
+    <method name="releaseAssignedContainer"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <doc>
+      <![CDATA[Release containers assigned by the Resource Manager. If the app cannot use
+ the container or wants to give up the container then it can release them.
+ The app needs to make new requests for the released resource capability if
+ it still needs it. eg. it released non-local resources
+ @param containerId]]>
+      </doc>
+    </method>
+    <method name="getAvailableResources" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the currently available resources in the cluster.
+ A valid value is available after a call to allocate has been made
+ @return Currently available resources]]>
+      </doc>
+    </method>
+    <method name="getClusterNodeCount" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the current number of nodes in the cluster.
+ A valid values is available after a call to allocate has been made
+ @return Current number of nodes in the cluster]]>
+      </doc>
+    </method>
+    <method name="registerTimelineV2Client"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineClient" type="org.apache.hadoop.yarn.client.api.TimelineV2Client"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Register TimelineClient to AMRMClient.
+ @param timelineClient
+ @throws YarnException when this method is invoked even when ATS V2 is not
+           configured.]]>
+      </doc>
+    </method>
+    <method name="getRegisteredTimelineV2Client" return="org.apache.hadoop.yarn.client.api.TimelineV2Client"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get registered timeline client.
+ @return the registered timeline client]]>
+      </doc>
+    </method>
+    <method name="updateBlacklist"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="blacklistAdditions" type="java.util.List"/>
+      <param name="blacklistRemovals" type="java.util.List"/>
+      <doc>
+      <![CDATA[Update application's blacklist with addition or removal resources.
+
+ @param blacklistAdditions list of resources which should be added to the
+        application blacklist
+ @param blacklistRemovals list of resources which should be removed from the
+        application blacklist]]>
+      </doc>
+    </method>
+    <method name="updateTrackingUrl"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="trackingUrl" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Update application's tracking url on next heartbeat.
+
+ @param trackingUrl new tracking url for this application]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="java.util.function.Supplier"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each 1000 ms.
+ See also {@link #waitFor(java.util.function.Supplier, int)}
+ and {@link #waitFor(java.util.function.Supplier, int, int)}
+ @param check the condition for which it should wait]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="java.util.function.Supplier"/>
+      <param name="checkEveryMillis" type="int"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each
+ <code>checkEveryMillis</code> ms.
+ See also {@link #waitFor(java.util.function.Supplier, int, int)}
+ @param check user defined checker
+ @param checkEveryMillis interval to call <code>check</code>]]>
+      </doc>
+    </method>
+    <method name="waitFor"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="check" type="java.util.function.Supplier"/>
+      <param name="checkEveryMillis" type="int"/>
+      <param name="logInterval" type="int"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Wait for <code>check</code> to return true for each
+ <code>checkEveryMillis</code> ms. In the main loop, this method will log
+ the message "waiting in main loop" for each <code>logInterval</code> times
+ iteration to confirm the thread is alive.
+ @param check user defined checker
+ @param checkEveryMillis interval to call <code>check</code>
+ @param logInterval interval to log for each]]>
+      </doc>
+    </method>
+    <field name="client" type="org.apache.hadoop.yarn.client.api.AMRMClient"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="handler" type="org.apache.hadoop.yarn.client.api.async.AMRMClientAsync.CallbackHandler"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="heartbeatIntervalMs" type="java.util.concurrent.atomic.AtomicInteger"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[<code>AMRMClientAsync</code> handles communication with the ResourceManager
+ and provides asynchronous updates on events such as container allocations and
+ completions.  It contains a thread that sends periodic heartbeats to the
+ ResourceManager.
+ 
+ It should be used by implementing a CallbackHandler:
+ <pre>
+ {@code
+ class MyCallbackHandler extends AMRMClientAsync.AbstractCallbackHandler {
+   public void onContainersAllocated(List<Container> containers) {
+     [run tasks on the containers]
+   }
+
+   public void onContainersUpdated(List<Container> containers) {
+     [determine if resource allocation of containers have been increased in
+      the ResourceManager, and if so, inform the NodeManagers to increase the
+      resource monitor/enforcement on the containers]
+   }
+
+   public void onContainersCompleted(List<ContainerStatus> statuses) {
+     [update progress, check whether app is done]
+   }
+   
+   public void onNodesUpdated(List<NodeReport> updated) {}
+   
+   public void onReboot() {}
+ }
+ }
+ </pre>
+ 
+ The client's lifecycle should be managed similarly to the following:
+ 
+ <pre>
+ {@code
+ AMRMClientAsync asyncClient = 
+     createAMRMClientAsync(appAttId, 1000, new MyCallbackhandler());
+ asyncClient.init(conf);
+ asyncClient.start();
+ RegisterApplicationMasterResponse response = asyncClient
+    .registerApplicationMaster(appMasterHostname, appMasterRpcPort,
+       appMasterTrackingUrl);
+ asyncClient.addContainerRequest(containerRequest);
+ [... wait for application to complete]
+ asyncClient.unregisterApplicationMaster(status, appMsg, trackingUrl);
+ asyncClient.stop();
+ }
+ </pre>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.async.AMRMClientAsync -->
+  <!-- start class org.apache.hadoop.yarn.client.api.async.NMClientAsync -->
+  <class name="NMClientAsync" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMClientAsync" type="org.apache.hadoop.yarn.client.api.async.NMClientAsync.AbstractCallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="NMClientAsync" type="java.lang.String, org.apache.hadoop.yarn.client.api.async.NMClientAsync.AbstractCallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="NMClientAsync" type="java.lang.String, org.apache.hadoop.yarn.client.api.NMClient, org.apache.hadoop.yarn.client.api.async.NMClientAsync.AbstractCallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="NMClientAsync" type="org.apache.hadoop.yarn.client.api.async.NMClientAsync.CallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="Use {@link #NMClientAsync(AbstractCallbackHandler)}
+             instead.">
+      <doc>
+      <![CDATA[@deprecated Use {@link #NMClientAsync(AbstractCallbackHandler)}
+             instead.]]>
+      </doc>
+    </constructor>
+    <constructor name="NMClientAsync" type="java.lang.String, org.apache.hadoop.yarn.client.api.async.NMClientAsync.CallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="Use {@link #NMClientAsync(String, AbstractCallbackHandler)}
+             instead.">
+      <doc>
+      <![CDATA[@deprecated Use {@link #NMClientAsync(String, AbstractCallbackHandler)}
+             instead.]]>
+      </doc>
+    </constructor>
+    <constructor name="NMClientAsync" type="java.lang.String, org.apache.hadoop.yarn.client.api.NMClient, org.apache.hadoop.yarn.client.api.async.NMClientAsync.CallbackHandler"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createNMClientAsync" return="org.apache.hadoop.yarn.client.api.async.NMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.NMClientAsync.AbstractCallbackHandler"/>
+    </method>
+    <method name="createNMClientAsync" return="org.apache.hadoop.yarn.client.api.async.NMClientAsync"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="Use {@link #createNMClientAsync(AbstractCallbackHandler)}
+             instead.">
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.NMClientAsync.CallbackHandler"/>
+      <doc>
+      <![CDATA[@deprecated Use {@link #createNMClientAsync(AbstractCallbackHandler)}
+             instead.]]>
+      </doc>
+    </method>
+    <method name="startContainerAsync"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <param name="containerLaunchContext" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+    </method>
+    <method name="increaseContainerResourceAsync"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+    </method>
+    <method name="updateContainerResourceAsync"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="container" type="org.apache.hadoop.yarn.api.records.Container"/>
+      <doc>
+      <![CDATA[<p>Update the resources of a container.</p>
+
+ <p>The <code>ApplicationMaster</code> or other applications that use the
+ client must provide the details of the container, including the Id and
+ the target resource encapsulated in the updated container token via
+ {@link Container}.
+ </p>
+
+ @param container the container with updated token.]]>
+      </doc>
+    </method>
+    <method name="reInitializeContainerAsync"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="containerLaunchContex" type="org.apache.hadoop.yarn.api.records.ContainerLaunchContext"/>
+      <param name="autoCommit" type="boolean"/>
+      <doc>
+      <![CDATA[<p>Re-Initialize the Container.</p>
+
+ @param containerId the Id of the container to Re-Initialize.
+ @param containerLaunchContex the updated ContainerLaunchContext.
+ @param autoCommit commit re-initialization automatically ?]]>
+      </doc>
+    </method>
+    <method name="restartContainerAsync"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <doc>
+      <![CDATA[<p>Restart the specified container.</p>
+
+ @param containerId the Id of the container to restart.]]>
+      </doc>
+    </method>
+    <method name="rollbackLastReInitializationAsync"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <doc>
+      <![CDATA[<p>Rollback last reInitialization of the specified container.</p>
+
+ @param containerId the Id of the container to restart.]]>
+      </doc>
+    </method>
+    <method name="commitLastReInitializationAsync"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <doc>
+      <![CDATA[<p>Commit last reInitialization of the specified container.</p>
+
+ @param containerId the Id of the container to commit reInitialize.]]>
+      </doc>
+    </method>
+    <method name="stopContainerAsync"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+    </method>
+    <method name="getContainerStatusAsync"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+    </method>
+    <method name="getClient" return="org.apache.hadoop.yarn.client.api.NMClient"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setClient"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="client" type="org.apache.hadoop.yarn.client.api.NMClient"/>
+    </method>
+    <method name="getCallbackHandler" return="org.apache.hadoop.yarn.client.api.async.NMClientAsync.CallbackHandler"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setCallbackHandler"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.NMClientAsync.CallbackHandler"/>
+    </method>
+    <field name="client" type="org.apache.hadoop.yarn.client.api.NMClient"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="callbackHandler" type="org.apache.hadoop.yarn.client.api.async.NMClientAsync.CallbackHandler"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[<code>NMClientAsync</code> handles communication with all the NodeManagers
+ and provides asynchronous updates on getting responses from them. It
+ maintains a thread pool to communicate with individual NMs where a number of
+ worker threads process requests to NMs by using {@link NMClientImpl}. The max
+ size of the thread pool is configurable through
+ {@link YarnConfiguration#NM_CLIENT_ASYNC_THREAD_POOL_MAX_SIZE}.
+
+ It should be used in conjunction with a CallbackHandler. For example
+
+ <pre>
+ {@code
+ class MyCallbackHandler extends NMClientAsync.AbstractCallbackHandler {
+   public void onContainerStarted(ContainerId containerId,
+       Map<String, ByteBuffer> allServiceResponse) {
+     [post process after the container is started, process the response]
+   }
+
+   public void onContainerResourceIncreased(ContainerId containerId,
+       Resource resource) {
+     [post process after the container resource is increased]
+   }
+
+   public void onContainerStatusReceived(ContainerId containerId,
+       ContainerStatus containerStatus) {
+     [make use of the status of the container]
+   }
+
+   public void onContainerStopped(ContainerId containerId) {
+     [post process after the container is stopped]
+   }
+
+   public void onStartContainerError(
+       ContainerId containerId, Throwable t) {
+     [handle the raised exception]
+   }
+
+   public void onGetContainerStatusError(
+       ContainerId containerId, Throwable t) {
+     [handle the raised exception]
+   }
+
+   public void onStopContainerError(
+       ContainerId containerId, Throwable t) {
+     [handle the raised exception]
+   }
+ }
+ }
+ </pre>
+
+ The client's life-cycle should be managed like the following:
+
+ <pre>
+ {@code
+ NMClientAsync asyncClient = 
+     NMClientAsync.createNMClientAsync(new MyCallbackhandler());
+ asyncClient.init(conf);
+ asyncClient.start();
+ asyncClient.startContainer(container, containerLaunchContext);
+ [... wait for container being started]
+ asyncClient.getContainerStatus(container.getId(), container.getNodeId(),
+     container.getContainerToken());
+ [... handle the status in the callback instance]
+ asyncClient.stopContainer(container.getId(), container.getNodeId(),
+     container.getContainerToken());
+ [... wait for container being stopped]
+ asyncClient.stop();
+ }
+ </pre>]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.async.NMClientAsync -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api.async.impl">
+</package>
+<package name="org.apache.hadoop.yarn.client.api.impl">
+</package>
+<package name="org.apache.hadoop.yarn.client.cli">
+  <!-- start class org.apache.hadoop.yarn.client.cli.LogsCLI -->
+  <class name="LogsCLI" extends="org.apache.hadoop.conf.Configured"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.util.Tool"/>
+    <constructor name="LogsCLI"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="run" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="createYarnClient" return="org.apache.hadoop.yarn.client.api.YarnClient"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="getAMContainerInfoForRMWebService" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="appId" type="java.lang.String"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="getClientResponseFromTimelineReader" return="com.sun.jersey.api.client.ClientResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="appId" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getMatchedContainerLogFiles" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.logaggregation.ContainerLogsRequest"/>
+      <param name="useRegex" type="boolean"/>
+      <param name="ignoreSizeLimit" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getResponseFromNMWebService" return="com.sun.jersey.api.client.ClientResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="webServiceClient" type="com.sun.jersey.api.client.Client"/>
+      <param name="request" type="org.apache.hadoop.yarn.logaggregation.ContainerLogsRequest"/>
+      <param name="logFile" type="java.lang.String"/>
+    </method>
+    <method name="getNodeHttpAddressFromRMWebString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.logaggregation.ContainerLogsRequest"/>
+      <exception name="ClientHandlerException" type="com.sun.jersey.api.client.ClientHandlerException"/>
+      <exception name="UniformInterfaceException" type="com.sun.jersey.api.client.UniformInterfaceException"/>
+      <exception name="JSONException" type="org.codehaus.jettison.json.JSONException"/>
+    </method>
+    <field name="HELP_CMD" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.cli.LogsCLI -->
+  <!-- start class org.apache.hadoop.yarn.client.cli.SchedConfCLI -->
+  <class name="SchedConfCLI" extends="org.apache.hadoop.conf.Configured"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.util.Tool"/>
+    <constructor name="SchedConfCLI"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="main"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="run" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="args" type="java.lang.String[]"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <doc>
+    <![CDATA[CLI for modifying scheduler configuration.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.cli.SchedConfCLI -->
+</package>
+<package name="org.apache.hadoop.yarn.client.util">
+</package>
+
+</api>
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.3.4.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.3.4.xml
new file mode 100644
index 0000000..3ec09cf
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.3.4.xml
@@ -0,0 +1,3975 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Fri Jul 29 14:09:19 GMT 2022 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop YARN Common 3.3.4"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-annotations.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/jdiff.jar -verbose -classpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/classes:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-3.3.4.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-protobuf_3_7/1.1.1/hadoop-shaded-protobuf_3_7-1.1.1.jar:/maven/com/google/guava/guava/27.0-jre/guava-27.0-jre.jar:/maven/com/google/guava/failureaccess/1.0/failureaccess-1.0.jar:/maven/com/google/guava/listenablefuture/9999.0-empty-to-avoid-conflict-with-guava/listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar:/maven/org/checkerframework/checker-qual/2.5.2/checker-qual-2.5.2.jar:/maven/com/google/j2objc/j2objc-annotations/1.1/j2objc-annotations-1.1.jar:/maven/org/codehaus/mojo/animal-sniffer-annotations/1.17/animal-sniffer-annotations-1.17.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/org/apache/httpcomponents/httpclient/4.5.13/httpclient-4.5.13.jar:/maven/org/apache/httpcomponents/httpcore/4.4.13/httpcore-4.4.13.jar:/maven/commons-net/commons-net/3.6/commons-net-3.6.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/jakarta/activation/jakarta.activation-api/1.2.1/jakarta.activation-api-1.2.1.jar:/maven/org/eclipse/jetty/jetty-server/9.4.43.v20210629/jetty-server-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-http/9.4.43.v20210629/jetty-http-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-io/9.4.43.v20210629/jetty-io-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-servlet/9.4.43.v20210629/jetty-servlet-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-security/9.4.43.v20210629/jetty-security-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-util-ajax/9.4.43.v20210629/jetty-util-ajax-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-webapp/9.4.43.v20210629/jetty-webapp-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-xml/9.4.43.v20210629/jetty-xml-9.4.43.v20210629.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-servlet/1.19/jersey-servlet-1.19.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-beanutils/commons-beanutils/1.9.4/commons-beanutils-1.9.4.jar:/maven/org/apache/commons/commons-configuration2/2.1.1/commons-configuration2-2.1.1.jar:/maven/org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.jar:/maven/org/apache/commons/commons-text/1.4/commons-text-1.4.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/com/google/re2j/re2j/1.1/re2j-1.1.jar:/maven/com/google/code/gson/gson/2.8.9/gson-2.8.9.jar:/maven/com/jcraft/jsch/0.1.55/jsch-0.1.55.jar:/maven/org/apache/curator/curator-client/4.2.0/curator-client-4.2.0.jar:/maven/org/apache/curator/curator-recipes/4.2.0/curator-recipes-4.2.0.jar:/maven/com/google/code/findbugs/jsr305/3.0.2/jsr305-3.0.2.jar:/maven/org/apache/zookeeper/zookeeper/3.5.6/zookeeper-3.5.6.jar:/maven/org/apache/zookeeper/zookeeper-jute/3.5.6/zookeeper-jute-3.5.6.jar:/maven/org/apache/yetus/audience-annotations/0.5.0/audience-annotations-0.5.0.jar:/maven/org/apache/kerby/kerb-core/1.0.1/kerb-core-1.0.1.jar:/maven/org/apache/kerby/kerby-pkix/1.0.1/kerby-pkix-1.0.1.jar:/maven/org/apache/kerby/kerby-asn1/1.0.1/kerby-asn1-1.0.1.jar:/maven/org/apache/kerby/kerby-util/1.0.1/kerby-util-1.0.1.jar:/maven/org/codehaus/woodstox/stax2-api/4.2.1/stax2-api-4.2.1.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.3.0/woodstox-core-5.3.0.jar:/maven/dnsjava/dnsjava/2.1.7/dnsjava-2.1.7.jar:/maven/org/xerial/snappy/snappy-java/1.1.8.2/snappy-java-1.1.8.2.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-3.3.4.jar:/maven/com/squareup/okhttp3/okhttp/4.9.3/okhttp-4.9.3.jar:/maven/com/squareup/okio/okio/2.8.0/okio-2.8.0.jar:/maven/org/jetbrains/kotlin/kotlin-stdlib/1.4.10/kotlin-stdlib-1.4.10.jar:/maven/org/jetbrains/kotlin/kotlin-stdlib-common/1.4.10/kotlin-stdlib-common-1.4.10.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.12.7/jackson-annotations-2.12.7.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-3.3.4.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-3.3.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/9.8.1/nimbus-jose-jwt-9.8.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/2.4.7/json-smart-2.4.7.jar:/maven/net/minidev/accessors-smart/2.4.7/accessors-smart-2.4.7.jar:/maven/org/ow2/asm/asm/5.0.4/asm-5.0.4.jar:/maven/org/apache/curator/curator-framework/4.2.0/curator-framework-4.2.0.jar:/maven/org/apache/kerby/kerb-simplekdc/1.0.1/kerb-simplekdc-1.0.1.jar:/maven/org/apache/kerby/kerb-client/1.0.1/kerb-client-1.0.1.jar:/maven/org/apache/kerby/kerby-config/1.0.1/kerby-config-1.0.1.jar:/maven/org/apache/kerby/kerb-common/1.0.1/kerb-common-1.0.1.jar:/maven/org/apache/kerby/kerb-crypto/1.0.1/kerb-crypto-1.0.1.jar:/maven/org/apache/kerby/kerb-util/1.0.1/kerb-util-1.0.1.jar:/maven/org/apache/kerby/token-provider/1.0.1/token-provider-1.0.1.jar:/maven/org/apache/kerby/kerb-admin/1.0.1/kerb-admin-1.0.1.jar:/maven/org/apache/kerby/kerb-server/1.0.1/kerb-server-1.0.1.jar:/maven/org/apache/kerby/kerb-identity/1.0.1/kerb-identity-1.0.1.jar:/maven/org/apache/kerby/kerby-xdr/1.0.1/kerby-xdr-1.0.1.jar:/maven/javax/xml/bind/jaxb-api/2.2.11/jaxb-api-2.2.11.jar:/maven/org/apache/commons/commons-compress/1.21/commons-compress-1.21.jar:/maven/javax/servlet/javax.servlet-api/3.1.0/javax.servlet-api-3.1.0.jar:/maven/commons-codec/commons-codec/1.15/commons-codec-1.15.jar:/maven/org/eclipse/jetty/jetty-util/9.4.43.v20210629/jetty-util-9.4.43.v20210629.jar:/maven/com/sun/jersey/jersey-core/1.19/jersey-core-1.19.jar:/maven/javax/ws/rs/jsr311-api/1.1.1/jsr311-api-1.1.1.jar:/maven/com/sun/jersey/jersey-client/1.19/jersey-client-1.19.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-guava/1.1.1/hadoop-shaded-guava-1.1.1.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/org/slf4j/slf4j-api/1.7.36/slf4j-api-1.7.36.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-3.3.4.jar:/usr/lib/jvm/java-8-openjdk-amd64/lib/tools.jar:/maven/com/google/inject/extensions/guice-servlet/4.0/guice-servlet-4.0.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/commons-io/commons-io/2.8.0/commons-io-2.8.0.jar:/maven/com/google/inject/guice/4.0/guice-4.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/jersey-server/1.19/jersey-server-1.19.jar:/maven/com/sun/jersey/jersey-json/1.19/jersey-json-1.19.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.19/jersey-guice-1.19.jar:/maven/ch/qos/reload4j/reload4j/1.2.22/reload4j-1.2.22.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.12.7/jackson-core-2.12.7.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.12.7/jackson-databind-2.12.7.jar:/maven/com/fasterxml/jackson/module/jackson-module-jaxb-annotations/2.12.7/jackson-module-jaxb-annotations-2.12.7.jar:/maven/jakarta/xml/bind/jakarta.xml.bind-api/2.3.2/jakarta.xml.bind-api-2.3.2.jar:/maven/com/fasterxml/jackson/jaxrs/jackson-jaxrs-json-provider/2.12.7/jackson-jaxrs-json-provider-2.12.7.jar:/maven/com/fasterxml/jackson/jaxrs/jackson-jaxrs-base/2.12.7/jackson-jaxrs-base-2.12.7.jar:/maven/xerces/xercesImpl/2.12.2/xercesImpl-2.12.2.jar:/maven/xml-apis/xml-apis/1.4.01/xml-apis-1.4.01.jar -sourcepath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-annotations.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/jdiff.jar -apidir /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/site/jdiff/xml -apiname Apache Hadoop YARN Common 3.3.4 -->
+<package name="org.apache.hadoop.yarn">
+  <!-- start class org.apache.hadoop.yarn.ContainerLogAppender -->
+  <class name="ContainerLogAppender" extends="org.apache.log4j.FileAppender"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Flushable"/>
+    <constructor name="ContainerLogAppender"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="activateOptions"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="append"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.log4j.spi.LoggingEvent"/>
+    </method>
+    <method name="flush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerLogDir" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Getter/Setter methods for log4j.]]>
+      </doc>
+    </method>
+    <method name="setContainerLogDir"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogDir" type="java.lang.String"/>
+    </method>
+    <method name="getContainerLogFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setContainerLogFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogFile" type="java.lang.String"/>
+    </method>
+    <method name="getTotalLogFileSize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setTotalLogFileSize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="logSize" type="long"/>
+      <doc>
+      <![CDATA[Setter so that log4j can configure it from the
+  configuration(log4j.properties).]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A simple log4j-appender for container's logs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.ContainerLogAppender -->
+  <!-- start class org.apache.hadoop.yarn.ContainerRollingLogAppender -->
+  <class name="ContainerRollingLogAppender" extends="org.apache.log4j.RollingFileAppender"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Flushable"/>
+    <constructor name="ContainerRollingLogAppender"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="activateOptions"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="flush"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerLogDir" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Getter/Setter methods for log4j.]]>
+      </doc>
+    </method>
+    <method name="setContainerLogDir"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogDir" type="java.lang.String"/>
+    </method>
+    <method name="getContainerLogFile" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setContainerLogFile"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerLogFile" type="java.lang.String"/>
+    </method>
+    <doc>
+    <![CDATA[A simple log4j-appender for container's logs.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.ContainerRollingLogAppender -->
+  <!-- start class org.apache.hadoop.yarn.YarnUncaughtExceptionHandler -->
+  <class name="YarnUncaughtExceptionHandler" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.lang.Thread.UncaughtExceptionHandler"/>
+    <constructor name="YarnUncaughtExceptionHandler"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="uncaughtException"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="t" type="java.lang.Thread"/>
+      <param name="e" type="java.lang.Throwable"/>
+    </method>
+    <doc>
+    <![CDATA[This class is intended to be installed by calling 
+ {@link Thread#setDefaultUncaughtExceptionHandler(UncaughtExceptionHandler)}
+ In the main entry point.  It is intended to try and cleanly shut down
+ programs using the YARN Event framework.
+ 
+ Note: Right now it only will shut down the program if a Error is caught, but
+ not any other exception.  Anything else is just logged.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.YarnUncaughtExceptionHandler -->
+</package>
+<package name="org.apache.hadoop.yarn.api">
+</package>
+<package name="org.apache.hadoop.yarn.client">
+  <!-- start class org.apache.hadoop.yarn.client.AHSProxy -->
+  <class name="AHSProxy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AHSProxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAHSProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="ahsAddress" type="java.net.InetSocketAddress"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="rmAddress" type="java.net.InetSocketAddress"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.AHSProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.ClientRMProxy -->
+  <class name="ClientRMProxy" extends="org.apache.hadoop.yarn.client.RMProxy"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="createRMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="configuration" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a proxy to the ResourceManager for the specified protocol.
+ @param configuration Configuration with all the required information.
+ @param protocol Client protocol for which proxy is being requested.
+ @param <T> Type of proxy.
+ @return Proxy to the ResourceManager for the specified client protocol.
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getRMDelegationTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get the token service name to be used for RMDelegationToken. Depending
+ on whether HA is enabled or not, this method generates the appropriate
+ service name as a comma-separated list of service addresses.
+
+ @param conf Configuration corresponding to the cluster we need the
+             RMDelegationToken for
+ @return - Service name for RMDelegationToken]]>
+      </doc>
+    </method>
+    <method name="getAMRMTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="address" type="java.lang.String"/>
+      <param name="defaultAddr" type="java.lang.String"/>
+      <param name="defaultPort" type="int"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.ClientRMProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.NMProxy -->
+  <class name="NMProxy" extends="org.apache.hadoop.yarn.client.ServerProxy"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMProxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createNMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="rpc" type="org.apache.hadoop.yarn.ipc.YarnRPC"/>
+      <param name="serverAddress" type="java.net.InetSocketAddress"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.NMProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.RMProxy -->
+  <class name="RMProxy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RMProxy"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRetryPolicy" return="org.apache.hadoop.io.retry.RetryPolicy"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="retryTime" type="long"/>
+      <param name="retryInterval" type="long"/>
+      <param name="isHAEnabled" type="boolean"/>
+      <doc>
+      <![CDATA[Fetch retry policy from Configuration and create the
+ retry policy with specified retryTime and retry interval.]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.RMProxy -->
+  <!-- start class org.apache.hadoop.yarn.client.ServerProxy -->
+  <class name="ServerProxy" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ServerProxy"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createRetryPolicy" return="org.apache.hadoop.io.retry.RetryPolicy"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="maxWaitTimeStr" type="java.lang.String"/>
+      <param name="defMaxWaitTime" type="long"/>
+      <param name="connectRetryIntervalStr" type="java.lang.String"/>
+      <param name="defRetryInterval" type="long"/>
+    </method>
+    <method name="createRetriableProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="user" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="rpc" type="org.apache.hadoop.yarn.ipc.YarnRPC"/>
+      <param name="serverAddress" type="java.net.InetSocketAddress"/>
+      <param name="retryPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.ServerProxy -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api">
+  <!-- start class org.apache.hadoop.yarn.client.api.AppAdminClient -->
+  <class name="AppAdminClient" extends="org.apache.hadoop.service.CompositeService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppAdminClient"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createAppAdminClient" return="org.apache.hadoop.yarn.client.api.AppAdminClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appType" type="java.lang.String"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[<p>
+ Create a new instance of AppAdminClient.
+ </p>
+
+ @param appType application type
+ @param conf configuration
+ @return app admin client]]>
+      </doc>
+    </method>
+    <method name="actionLaunch" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fileName" type="java.lang.String"/>
+      <param name="appName" type="java.lang.String"/>
+      <param name="lifetime" type="java.lang.Long"/>
+      <param name="queue" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Launch a new YARN application.
+ </p>
+
+ @param fileName specification of application
+ @param appName name of the application
+ @param lifetime lifetime of the application
+ @param queue queue of the application
+ @return exit code
+ @throws IOException IOException
+ @throws YarnException exception in client or server]]>
+      </doc>
+    </method>
+    <method name="actionStop" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Stop a YARN application (attempt to stop gracefully before killing the
+ application). In the case of a long-running service, the service may be
+ restarted later.
+ </p>
+
+ @param appName the name of the application
+ @return exit code
+ @throws IOException IOException
+ @throws YarnException exception in client or server]]>
+      </doc>
+    </method>
+    <method name="actionStart" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Start a YARN application from a previously saved specification. In the
+ case of a long-running service, the service must have been previously
+ launched/started and then stopped, or previously saved but not started.
+ </p>
+
+ @param appName the name of the application
+ @return exit code
+ @throws IOException IOException
+ @throws YarnException exception in client or server]]>
+      </doc>
+    </method>
+    <method name="actionSave" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="fileName" type="java.lang.String"/>
+      <param name="appName" type="java.lang.String"/>
+      <param name="lifetime" type="java.lang.Long"/>
+      <param name="queue" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Save the specification for a YARN application / long-running service.
+ The application may be started later.
+ </p>
+
+ @param fileName specification of application to save
+ @param appName name of the application
+ @param lifetime lifetime of the application
+ @param queue queue of the application
+ @return exit code
+ @throws IOException IOException
+ @throws YarnException exception in client or server]]>
+      </doc>
+    </method>
+    <method name="actionDestroy" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Remove the specification and all application data for a YARN application.
+ The application cannot be running.
+ </p>
+
+ @param appName the name of the application
+ @return exit code
+ @throws IOException IOException
+ @throws YarnException exception in client or server]]>
+      </doc>
+    </method>
+    <method name="actionFlex" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appName" type="java.lang.String"/>
+      <param name="componentCounts" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Change the number of running containers for a component of a YARN
+ application / long-running service.
+ </p>
+
+ @param appName the name of the application
+ @param componentCounts map of component name to new component count or
+                        amount to change existing component count (e.g.
+                        5, +5, -5)
+ @return exit code
+ @throws IOException IOException
+ @throws YarnException exception in client or server]]>
+      </doc>
+    </method>
+    <method name="enableFastLaunch" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="destinationFolder" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Upload AM dependencies to HDFS. This makes future application launches
+ faster since the dependencies do not have to be uploaded on each launch.
+ </p>
+
+ @param destinationFolder
+          an optional HDFS folder where dependency tarball will be uploaded
+ @return exit code
+ @throws IOException
+           IOException
+ @throws YarnException
+           exception in client or server]]>
+      </doc>
+    </method>
+    <method name="getStatusString" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appIdOrName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Get detailed app specific status string for a YARN application.
+ </p>
+
+ @param appIdOrName appId or appName
+ @return status string
+ @throws IOException IOException
+ @throws YarnException exception in client or server]]>
+      </doc>
+    </method>
+    <method name="initiateUpgrade" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appName" type="java.lang.String"/>
+      <param name="fileName" type="java.lang.String"/>
+      <param name="autoFinalize" type="boolean"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Initiate upgrade of a long running service.
+
+ @param appName      the name of the application.
+ @param fileName     specification of application upgrade to save.
+ @param autoFinalize when true, finalization of upgrade will be done
+                     automatically.
+ @return exit code
+ @throws IOException   IOException
+ @throws YarnException exception in client or server]]>
+      </doc>
+    </method>
+    <method name="actionUpgradeInstances" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appName" type="java.lang.String"/>
+      <param name="componentInstances" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Upgrade component instances of a long running service.
+
+ @param appName            the name of the application.
+ @param componentInstances the name of the component instances.]]>
+      </doc>
+    </method>
+    <method name="actionUpgradeComponents" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appName" type="java.lang.String"/>
+      <param name="components" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Upgrade components of a long running service.
+
+ @param appName    the name of the application.
+ @param components the name of the components.]]>
+      </doc>
+    </method>
+    <method name="actionCleanUp" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appName" type="java.lang.String"/>
+      <param name="userName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="InterruptedException" type="java.lang.InterruptedException"/>
+      <doc>
+      <![CDATA[Operation to be performed by the RM after an application has completed.
+
+ @param appName  the name of the application.
+ @param userName the name of the user.
+ @return exit code]]>
+      </doc>
+    </method>
+    <method name="getInstances" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appName" type="java.lang.String"/>
+      <param name="components" type="java.util.List"/>
+      <param name="version" type="java.lang.String"/>
+      <param name="containerStates" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+    </method>
+    <method name="actionUpgradeExpress" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appName" type="java.lang.String"/>
+      <param name="fileName" type="java.io.File"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Express upgrade a long running service.
+
+ @param appName  the name of the application
+ @param fileName specification of application upgrade to save.
+ @return exit code]]>
+      </doc>
+    </method>
+    <method name="actionCancelUpgrade" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Cancels the upgrade of the service.
+
+ @param appName the name of the application
+ @return exit code
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="actionDecommissionInstances" return="int"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appName" type="java.lang.String"/>
+      <param name="componentInstances" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Decommission component instances of a long running service.
+
+ @param appName            the name of the application.
+ @param componentInstances the name of the component instances.]]>
+      </doc>
+    </method>
+    <field name="YARN_APP_ADMIN_CLIENT_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_CLASS_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="UNIT_TEST_TYPE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="UNIT_TEST_CLASS_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Client for managing applications.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.AppAdminClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.TimelineClient -->
+  <class name="TimelineClient" extends="org.apache.hadoop.service.CompositeService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.io.Flushable"/>
+    <constructor name="TimelineClient" type="java.lang.String"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createTimelineClient" return="org.apache.hadoop.yarn.client.api.TimelineClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Creates an instance of the timeline v.1.x client.
+ The current UGI when the user initialize the client will be used to do the
+ put and the delegation token operations. The current user may use
+ {@link UserGroupInformation#doAs} another user to construct and initialize
+ a timeline client if the following operations are supposed to be conducted
+ by that user.
+
+ @return the created timeline client instance]]>
+      </doc>
+    </method>
+    <method name="putEntities" return="org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="entities" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEntity[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a number of conceptual entities to the timeline
+ server. It is a blocking API. The method will not return until it gets the
+ response from the timeline server.
+ </p>
+ 
+ @param entities
+          the collection of {@link TimelineEntity}
+ @return the error information if the sent entities are not correctly stored
+ @throws IOException if there are I/O errors
+ @throws YarnException if entities are incomplete/invalid]]>
+      </doc>
+    </method>
+    <method name="putEntities" return="org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="groupId" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId"/>
+      <param name="entities" type="org.apache.hadoop.yarn.api.records.timeline.TimelineEntity[]"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a number of conceptual entities to the timeline
+ server. It is a blocking API. The method will not return until it gets the
+ response from the timeline server.
+
+ This API is only for timeline service v1.5
+ </p>
+
+ @param appAttemptId {@link ApplicationAttemptId}
+ @param groupId {@link TimelineEntityGroupId}
+ @param entities
+          the collection of {@link TimelineEntity}
+ @return the error information if the sent entities are not correctly stored
+ @throws IOException if there are I/O errors
+ @throws YarnException if entities are incomplete/invalid]]>
+      </doc>
+    </method>
+    <method name="putDomain"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="domain" type="org.apache.hadoop.yarn.api.records.timeline.TimelineDomain"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a domain to the timeline server. It is a
+ blocking API. The method will not return until it gets the response from
+ the timeline server.
+ </p>
+ 
+ @param domain
+          an {@link TimelineDomain} object
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="putDomain"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="domain" type="org.apache.hadoop.yarn.api.records.timeline.TimelineDomain"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Send the information of a domain to the timeline server. It is a
+ blocking API. The method will not return until it gets the response from
+ the timeline server.
+
+ This API is only for timeline service v1.5
+ </p>
+
+ @param domain
+          an {@link TimelineDomain} object
+ @param appAttemptId {@link ApplicationAttemptId}
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="getDelegationToken" return="org.apache.hadoop.security.token.Token"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="renewer" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Get a delegation token so as to be able to talk to the timeline server in a
+ secure way.
+ </p>
+ 
+ @param renewer
+          Address of the renewer who can renew these tokens when needed by
+          securely talking to the timeline server
+ @return a delegation token ({@link Token}) that can be used to talk to the
+         timeline server
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="renewDelegationToken" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineDT" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Renew a timeline delegation token.
+ </p>
+ 
+ @param timelineDT
+          the delegation token to renew
+ @return the new expiration time
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <method name="cancelDelegationToken"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="timelineDT" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[<p>
+ Cancel a timeline delegation token.
+ </p>
+ 
+ @param timelineDT
+          the delegation token to cancel
+ @throws IOException
+ @throws YarnException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A client library that can be used to post some information in terms of a
+ number of conceptual entities. This client library needs to be used along
+ with Timeline V.1.x server versions.
+ Refer {@link TimelineV2Client} for ATS V2 interface.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.TimelineClient -->
+  <!-- start class org.apache.hadoop.yarn.client.api.TimelineReaderClient -->
+  <class name="TimelineReaderClient" extends="org.apache.hadoop.service.CompositeService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineReaderClient" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="createTimelineReaderClient" return="org.apache.hadoop.yarn.client.api.TimelineReaderClient"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new instance of Timeline Reader Client.]]>
+      </doc>
+    </method>
+    <method name="getApplicationEntity" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="fields" type="java.lang.String"/>
+      <param name="filters" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets application entity.
+ @param appId application id
+ @param fields Fields to be fetched. Defaults to INFO.
+ @param filters Filters to be applied while fetching entities.
+ @return entity of the application
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttemptEntity" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="fields" type="java.lang.String"/>
+      <param name="filters" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets application attempt entity.
+ @param appAttemptId application attempt id
+ @param fields Fields to be fetched. Defaults to INFO.
+ @param filters Filters to be applied while fetching entities.
+ @return entity associated with application attempt
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getApplicationAttemptEntities" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="fields" type="java.lang.String"/>
+      <param name="filters" type="java.util.Map"/>
+      <param name="limit" type="long"/>
+      <param name="fromId" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets application attempt entities.
+ @param appId application id
+ @param fields Fields to be fetched. Defaults to INFO.
+ @param filters Filters to be applied while fetching entities.
+ @param limit Number of entities to return.
+ @param fromId Retrieve next set of generic ids from given fromId
+ @return list of application attempt entities
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerEntity" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="fields" type="java.lang.String"/>
+      <param name="filters" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets Timeline entity for the container.
+ @param containerId container id
+ @param fields Fields to be fetched. Defaults to INFO.
+ @param filters Filters to be applied while fetching entities.
+ @return timeline entity for container
+ @throws IOException]]>
+      </doc>
+    </method>
+    <method name="getContainerEntities" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="fields" type="java.lang.String"/>
+      <param name="filters" type="java.util.Map"/>
+      <param name="limit" type="long"/>
+      <param name="fromId" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Gets container entities for an application.
+ @param appId application id
+ @param fields Fields to be fetched. Defaults to INFO.
+ @param filters Filters to be applied while fetching entities.
+ @param limit Number of entities to return.
+ @param fromId Retrieve next set of generic ids from given fromId
+ @return list of entities
+ @throws IOException]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A client library that can be used to get Timeline Entities associated with
+ application, application attempt or containers. This client library needs to
+ be used along with time line v.2 server version.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.TimelineReaderClient -->
+</package>
+<package name="org.apache.hadoop.yarn.client.api.impl">
+  <!-- start class org.apache.hadoop.yarn.client.api.impl.TimelineReaderClientImpl -->
+  <class name="TimelineReaderClientImpl" extends="org.apache.hadoop.yarn.client.api.TimelineReaderClient"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineReaderClientImpl"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="serviceInit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="getApplicationEntity" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="fields" type="java.lang.String"/>
+      <param name="filters" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getApplicationAttemptEntity" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="fields" type="java.lang.String"/>
+      <param name="filters" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getApplicationAttemptEntities" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="fields" type="java.lang.String"/>
+      <param name="filters" type="java.util.Map"/>
+      <param name="limit" type="long"/>
+      <param name="fromId" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getContainerEntity" return="org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+      <param name="fields" type="java.lang.String"/>
+      <param name="filters" type="java.util.Map"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getContainerEntities" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="fields" type="java.lang.String"/>
+      <param name="filters" type="java.util.Map"/>
+      <param name="limit" type="long"/>
+      <param name="fromId" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="doGetUri" return="com.sun.jersey.api.client.ClientResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="base" type="java.net.URI"/>
+      <param name="path" type="java.lang.String"/>
+      <param name="params" type="javax.ws.rs.core.MultivaluedMap"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <doc>
+    <![CDATA[Implementation of TimelineReaderClient interface.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.client.api.impl.TimelineReaderClientImpl -->
+</package>
+<package name="org.apache.hadoop.yarn.event">
+  <!-- start class org.apache.hadoop.yarn.event.AbstractEvent -->
+  <class name="AbstractEvent" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.event.Event"/>
+    <constructor name="AbstractEvent" type="TYPE"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AbstractEvent" type="TYPE, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getTimestamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getType" return="TYPE"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Parent class of all the events. All events extend this class.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.event.AbstractEvent -->
+  <!-- start class org.apache.hadoop.yarn.event.AsyncDispatcher -->
+  <class name="AsyncDispatcher" extends="org.apache.hadoop.service.AbstractService"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.event.Dispatcher"/>
+    <constructor name="AsyncDispatcher"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AsyncDispatcher" type="java.util.concurrent.BlockingQueue"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AsyncDispatcher" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Set a name for this dispatcher thread.
+ @param dispatcherName name of the dispatcher thread]]>
+      </doc>
+    </constructor>
+    <method name="disableExitOnDispatchException"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="serviceInit"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="serviceStart"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="setDrainEventsOnStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="serviceStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="dispatch"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="event" type="org.apache.hadoop.yarn.event.Event"/>
+    </method>
+    <method name="register"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventType" type="java.lang.Class"/>
+      <param name="handler" type="org.apache.hadoop.yarn.event.EventHandler"/>
+    </method>
+    <method name="getEventHandler" return="org.apache.hadoop.yarn.event.EventHandler"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isEventThreadWaiting" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="isDrained" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="isStopped" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="addMetrics"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="metrics" type="org.apache.hadoop.yarn.metrics.EventTypeMetrics"/>
+      <param name="eventClass" type="java.lang.Class"/>
+    </method>
+    <field name="eventDispatchers" type="java.util.Map"
+      transient="false" volatile="false"
+      static="false" final="true" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Dispatches {@link Event}s in a separate thread. Currently only single thread
+ does that. Potentially there could be multiple channels for each event type
+ class and a thread pool can be used to dispatch the events.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.event.AsyncDispatcher -->
+  <!-- start interface org.apache.hadoop.yarn.event.Dispatcher -->
+  <interface name="Dispatcher"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getEventHandler" return="org.apache.hadoop.yarn.event.EventHandler"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="register"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventType" type="java.lang.Class"/>
+      <param name="handler" type="org.apache.hadoop.yarn.event.EventHandler"/>
+    </method>
+    <doc>
+    <![CDATA[Event Dispatcher interface. It dispatches events to registered 
+ event handlers based on event types.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.event.Dispatcher -->
+  <!-- start interface org.apache.hadoop.yarn.event.Event -->
+  <interface name="Event"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getType" return="TYPE"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTimestamp" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Interface defining events api.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.event.Event -->
+  <!-- start interface org.apache.hadoop.yarn.event.EventHandler -->
+  <interface name="EventHandler"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="handle"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="event" type="T"/>
+    </method>
+    <doc>
+    <![CDATA[Interface for handling events of type T
+
+ @param <T> parameterized event of type T]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.event.EventHandler -->
+</package>
+<package name="org.apache.hadoop.yarn.factories">
+</package>
+<package name="org.apache.hadoop.yarn.factory.providers">
+</package>
+<package name="org.apache.hadoop.yarn.logaggregation">
+  <!-- start class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat -->
+  <class name="AggregatedLogFormat" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AggregatedLogFormat"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat -->
+  <!-- start class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey -->
+  <class name="AggregatedLogFormat.LogKey" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.io.Writable"/>
+    <constructor name="LogKey"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="LogKey" type="org.apache.hadoop.yarn.api.records.ContainerId"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="LogKey" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="obj" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey -->
+  <!-- start class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader -->
+  <class name="AggregatedLogFormat.LogReader" extends="java.lang.Object"
+    abstract="false"
+    static="true" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="LogReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="getApplicationOwner" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns the owner of the application.
+
+ @return the application owner.
+ @throws IOException if we can not get the application owner.]]>
+      </doc>
+    </method>
+    <method name="getApplicationAcls" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns ACLs for the application. An empty map is returned if no ACLs are
+ found.
+
+ @return a map of the Application ACLs.
+ @throws IOException if we can not get the application acls.]]>
+      </doc>
+    </method>
+    <method name="next" return="java.io.DataInputStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Read the next key and return the value-stream.
+ 
+ @param key the log key
+ @return the valueStream if there are more keys or null otherwise
+ @throws IOException if we can not get the dataInputStream
+ for the next key]]>
+      </doc>
+    </method>
+    <method name="readAcontainerLogs"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="writer" type="java.io.Writer"/>
+      <param name="logUploadedTime" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes all logs for a single container to the provided writer.
+ @param valueStream the valueStream
+ @param writer the log writer
+ @param logUploadedTime the time stamp
+ @throws IOException if we can not read the container logs.]]>
+      </doc>
+    </method>
+    <method name="readAcontainerLogs"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="writer" type="java.io.Writer"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Writes all logs for a single container to the provided writer.
+ @param valueStream the value stream
+ @param writer the log writer
+ @throws IOException if we can not read the container logs.]]>
+      </doc>
+    </method>
+    <method name="readAContainerLogsForALogType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <param name="logUploadedTime" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ all types for a single container.
+ 
+ @param valueStream the value stream
+ @param out the print stream
+ @param logUploadedTime the time stamp
+ @throws IOException if we can not read the container log by specifying
+ the container log type.]]>
+      </doc>
+    </method>
+    <method name="readAContainerLogsForALogType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <param name="logUploadedTime" type="long"/>
+      <param name="bytes" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ all types for a single container for the specific bytes.
+
+ @param valueStream the value stream
+ @param out the output print stream
+ @param logUploadedTime the log upload time stamp
+ @param bytes the output size of the log
+ @throws IOException if we can not read the container log]]>
+      </doc>
+    </method>
+    <method name="readAContainerLogsForALogType"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ all types for a single container.
+ 
+ @param valueStream the value stream
+ @param out the output print stream
+ @throws IOException if we can not read the container log]]>
+      </doc>
+    </method>
+    <method name="readContainerLogsForALogType" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <param name="logUploadedTime" type="long"/>
+      <param name="logType" type="java.util.List"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ the specific types for a single container.
+ @param valueStream the value stream
+ @param out the output print stream
+ @param logUploadedTime the log uploaded time stamp
+ @param logType the given log type
+ @throws IOException if we can not read the container logs]]>
+      </doc>
+    </method>
+    <method name="readContainerLogsForALogType" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="valueStream" type="java.io.DataInputStream"/>
+      <param name="out" type="java.io.PrintStream"/>
+      <param name="logUploadedTime" type="long"/>
+      <param name="logType" type="java.util.List"/>
+      <param name="bytes" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Keep calling this till you get a {@link EOFException} for getting logs of
+ the specific types for a single container.
+ @param valueStream the value stream
+ @param out the output print stream
+ @param logUploadedTime the log uploaded time stamp
+ @param logType the given log type
+ @throws IOException if we can not read the container logs]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader -->
+</package>
+<package name="org.apache.hadoop.yarn.logaggregation.filecontroller">
+  <!-- start class org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController -->
+  <class name="LogAggregationFileController" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="LogAggregationFileController"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="controllerName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Initialize the log file controller.
+ @param conf the Configuration
+ @param controllerName the log controller class name]]>
+      </doc>
+    </method>
+    <method name="initInternal"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Derived classes initialize themselves using this method.
+ @param conf the Configuration]]>
+      </doc>
+    </method>
+    <method name="getRemoteRootLogDir" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the remote root log directory.
+ @return the remote root log directory path]]>
+      </doc>
+    </method>
+    <method name="getRemoteRootLogDirSuffix" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the log aggregation directory suffix.
+ @return the log aggregation directory suffix]]>
+      </doc>
+    </method>
+    <method name="initializeWriter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="context" type="org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerContext"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Initialize the writer.
+ @param context the {@link LogAggregationFileControllerContext}
+ @throws IOException if fails to initialize the writer]]>
+      </doc>
+    </method>
+    <method name="closeWriter"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="LogAggregationDFSException" type="org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationDFSException"/>
+      <doc>
+      <![CDATA[Close the writer.
+ @throws LogAggregationDFSException if the closing of the writer fails
+         (for example due to HDFS quota being exceeded)]]>
+      </doc>
+    </method>
+    <method name="write"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="logKey" type="org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey"/>
+      <param name="logValue" type="org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogValue"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Write the log content.
+ @param logKey the log key
+ @param logValue the log content
+ @throws IOException if fails to write the logs]]>
+      </doc>
+    </method>
+    <method name="postWrite"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="record" type="org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerContext"/>
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[Operations needed after write the log content.
+ @param record the {@link LogAggregationFileControllerContext}
+ @throws Exception if anything fails]]>
+      </doc>
+    </method>
+    <method name="closePrintStream"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.OutputStream"/>
+    </method>
+    <method name="readAggregatedLogs" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="logRequest" type="org.apache.hadoop.yarn.logaggregation.ContainerLogsRequest"/>
+      <param name="os" type="java.io.OutputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Output container log.
+ @param logRequest {@link ContainerLogsRequest}
+ @param os the output stream
+ @return true if we can read the aggregated logs successfully
+ @throws IOException if we can not access the log file.]]>
+      </doc>
+    </method>
+    <method name="readAggregatedLogsMeta" return="java.util.List"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="logRequest" type="org.apache.hadoop.yarn.logaggregation.ContainerLogsRequest"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Return a list of {@link ContainerLogMeta} for an application
+ from Remote FileSystem.
+
+ @param logRequest {@link ContainerLogsRequest}
+ @return a list of {@link ContainerLogMeta}
+ @throws IOException if there is no available log file]]>
+      </doc>
+    </method>
+    <method name="renderAggregatedLogsBlock"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="html" type="org.apache.hadoop.yarn.webapp.view.HtmlBlock.Block"/>
+      <param name="context" type="org.apache.hadoop.yarn.webapp.View.ViewContext"/>
+      <doc>
+      <![CDATA[Render Aggregated Logs block.
+ @param html the html
+ @param context the ViewContext]]>
+      </doc>
+    </method>
+    <method name="getApplicationOwner" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="aggregatedLogPath" type="org.apache.hadoop.fs.Path"/>
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns the owner of the application.
+
+ @param aggregatedLogPath the aggregatedLog path
+ @param appId the ApplicationId
+ @return the application owner
+ @throws IOException if we can not get the application owner]]>
+      </doc>
+    </method>
+    <method name="getApplicationAcls" return="java.util.Map"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="aggregatedLogPath" type="org.apache.hadoop.fs.Path"/>
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns ACLs for the application. An empty map is returned if no ACLs are
+ found.
+
+ @param aggregatedLogPath the aggregatedLog path.
+ @param appId the ApplicationId
+ @return a map of the Application ACLs.
+ @throws IOException if we can not get the application acls]]>
+      </doc>
+    </method>
+    <method name="verifyAndCreateRemoteLogDir"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Verify and create the remote log directory.]]>
+      </doc>
+    </method>
+    <method name="createAppDir"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="user" type="java.lang.String"/>
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="userUgi" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <doc>
+      <![CDATA[Create remote Application directory for log aggregation.
+ @param user the user
+ @param appId the application ID
+ @param userUgi the UGI]]>
+      </doc>
+    </method>
+    <method name="getFileSystem" return="org.apache.hadoop.fs.FileSystem"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="createDir"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="fsPerm" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="checkExists" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="fsPerm" type="org.apache.hadoop.fs.permission.FsPermission"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getRemoteNodeLogFileForApp" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="user" type="java.lang.String"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <doc>
+      <![CDATA[Get the remote aggregated log path.
+ @param appId the ApplicationId
+ @param user the Application Owner
+ @param nodeId the NodeManager Id
+ @return the remote aggregated log path]]>
+      </doc>
+    </method>
+    <method name="getRemoteAppLogDir" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="appOwner" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the remote application directory for log aggregation.
+ @param appId the Application ID
+ @param appOwner the Application Owner
+ @return the remote application directory
+ @throws IOException if can not find the remote application directory]]>
+      </doc>
+    </method>
+    <method name="getOlderRemoteAppLogDir" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="appOwner" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Get the older remote application directory for log aggregation.
+ @param appId the Application ID
+ @param appOwner the Application Owner
+ @return the older remote application directory
+ @throws IOException if can not find the remote application directory]]>
+      </doc>
+    </method>
+    <method name="cleanOldLogs"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="remoteNodeLogFileForApp" type="org.apache.hadoop.fs.Path"/>
+      <param name="nodeId" type="org.apache.hadoop.yarn.api.records.NodeId"/>
+      <param name="userUgi" type="org.apache.hadoop.security.UserGroupInformation"/>
+    </method>
+    <method name="aggregatedLogSuffix" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="fileName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Create the aggregated log suffix. The LogAggregationFileController
+ should call this to get the suffix and append the suffix to the end
+ of each log. This would keep the aggregated log format consistent.
+
+ @param fileName the File Name
+ @return the aggregated log suffix String]]>
+      </doc>
+    </method>
+    <method name="isFsSupportsChmod" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="belongsToAppAttempt" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="appAttemptId" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+      <param name="containerIdStr" type="java.lang.String"/>
+    </method>
+    <field name="TLDIR_PERMISSIONS" type="org.apache.hadoop.fs.permission.FsPermission"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Permissions for the top level directory under which app directories will be
+ created.]]>
+      </doc>
+    </field>
+    <field name="APP_DIR_PERMISSIONS" type="org.apache.hadoop.fs.permission.FsPermission"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Permissions for the Application directory.]]>
+      </doc>
+    </field>
+    <field name="APP_LOG_FILE_UMASK" type="org.apache.hadoop.fs.permission.FsPermission"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="protected"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Umask for the log file.]]>
+      </doc>
+    </field>
+    <field name="conf" type="org.apache.hadoop.conf.Configuration"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="remoteRootLogDir" type="org.apache.hadoop.fs.Path"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="remoteRootLogDirSuffix" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="retentionSize" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="fileControllerName" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="fsSupportsChmod" type="boolean"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Base class to implement Log Aggregation File Controller.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController -->
+</package>
+<package name="org.apache.hadoop.yarn.logaggregation.filecontroller.ifile">
+</package>
+<package name="org.apache.hadoop.yarn.logaggregation.filecontroller.tfile">
+</package>
+<package name="org.apache.hadoop.yarn.nodelabels">
+</package>
+<package name="org.apache.hadoop.yarn.nodelabels.event">
+</package>
+<package name="org.apache.hadoop.yarn.security">
+  <!-- start class org.apache.hadoop.yarn.security.AccessRequest -->
+  <class name="AccessRequest" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AccessRequest" type="org.apache.hadoop.yarn.security.PrivilegedEntity, org.apache.hadoop.security.UserGroupInformation, org.apache.hadoop.yarn.security.AccessType, java.lang.String, java.lang.String, java.lang.String, java.util.List"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAccessType" return="org.apache.hadoop.yarn.security.AccessType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAppId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAppName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getEntity" return="org.apache.hadoop.yarn.security.PrivilegedEntity"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getForwardedAddresses" return="java.util.List"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRemoteAddress" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[This request object contains all the context information to determine whether
+ a user has permission to access the target entity.
+ user       : the user who's currently accessing
+ accessType : the access type against the entity.
+ entity     : the target object user is accessing.
+ appId      : the associated app Id for current access. This could be null
+              if no app is associated.
+ appName    : the associated app name for current access. This could be null if
+              no app is associated.
+ remoteAddress : The caller's remote ip address.
+ forwardedAddresses : In case this is an http request, this contains the
+                    originating IP address of a client connecting to a web
+                    server through an HTTP proxy or load balancer. This
+                    parameter is null, if it's a RPC request.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.AccessRequest -->
+  <!-- start class org.apache.hadoop.yarn.security.AMRMTokenIdentifier -->
+  <class name="AMRMTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AMRMTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AMRMTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.AMRMTokenIdentifierProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND_NAME" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[AMRMTokenIdentifier is the TokenIdentifier to be used by
+ ApplicationMasters to authenticate to the ResourceManager.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.AMRMTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.AMRMTokenSelector -->
+  <class name="AMRMTokenSelector" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+    <constructor name="AMRMTokenSelector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.AMRMTokenSelector -->
+  <!-- start class org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo -->
+  <class name="ContainerManagerSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerManagerSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo -->
+  <!-- start class org.apache.hadoop.yarn.security.ContainerTokenIdentifier -->
+  <class name="ContainerTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext"
+      static="false" final="false" visibility="public"
+      deprecated="Use one of the other constructors instead.">
+      <doc>
+      <![CDATA[Creates a instance.
+
+ @param appSubmitter appSubmitter
+ @param containerID container ID
+ @param creationTime creation time
+ @param expiryTimeStamp expiry timestamp
+ @param hostName hostname
+ @param logAggregationContext log aggregation context
+ @param masterKeyId master key ID
+ @param priority priority
+ @param r resource needed by the container
+ @param rmIdentifier ResourceManager identifier
+ @deprecated Use one of the other constructors instead.]]>
+      </doc>
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext, java.lang.String, org.apache.hadoop.yarn.server.api.ContainerType"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, int, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext, java.lang.String, org.apache.hadoop.yarn.server.api.ContainerType, org.apache.hadoop.yarn.api.records.ExecutionType"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, int, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext, java.lang.String, org.apache.hadoop.yarn.server.api.ContainerType, org.apache.hadoop.yarn.api.records.ExecutionType, long"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Convenience Constructor for existing clients.
+
+ @param containerID containerID
+ @param containerVersion containerVersion
+ @param hostName hostName
+ @param appSubmitter appSubmitter
+ @param r resource
+ @param expiryTimeStamp expiryTimeStamp
+ @param masterKeyId masterKeyId
+ @param rmIdentifier rmIdentifier
+ @param priority priority
+ @param creationTime creationTime
+ @param logAggregationContext logAggregationContext
+ @param nodeLabelExpression nodeLabelExpression
+ @param containerType containerType
+ @param executionType executionType
+ @param allocationRequestId allocationRequestId]]>
+      </doc>
+    </constructor>
+    <constructor name="ContainerTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ContainerId, int, java.lang.String, java.lang.String, org.apache.hadoop.yarn.api.records.Resource, long, int, long, org.apache.hadoop.yarn.api.records.Priority, long, org.apache.hadoop.yarn.api.records.LogAggregationContext, java.lang.String, org.apache.hadoop.yarn.server.api.ContainerType, org.apache.hadoop.yarn.api.records.ExecutionType, long, java.util.Set"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a Container Token Identifier.
+
+ @param containerID containerID
+ @param containerVersion containerVersion
+ @param hostName hostName
+ @param appSubmitter appSubmitter
+ @param r resource
+ @param expiryTimeStamp expiryTimeStamp
+ @param masterKeyId masterKeyId
+ @param rmIdentifier rmIdentifier
+ @param priority priority
+ @param creationTime creationTime
+ @param logAggregationContext logAggregationContext
+ @param nodeLabelExpression nodeLabelExpression
+ @param containerType containerType
+ @param executionType executionType
+ @param allocationRequestId allocationRequestId
+ @param allocationTags Set of allocation Tags.]]>
+      </doc>
+    </constructor>
+    <constructor name="ContainerTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor needed by RPC layer/SecretManager.]]>
+      </doc>
+    </constructor>
+    <method name="getContainerID" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationSubmitter" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNmHostAddress" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getResource" return="org.apache.hadoop.yarn.api.records.Resource"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getExpiryTimeStamp" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getMasterKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPriority" return="org.apache.hadoop.yarn.api.records.Priority"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getCreationTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRMIdentifier" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the RMIdentifier of RM in which containers are allocated.
+ @return RMIdentifier]]>
+      </doc>
+    </method>
+    <method name="getContainerType" return="org.apache.hadoop.yarn.server.api.ContainerType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the ContainerType of container to allocate.
+ @return ContainerType]]>
+      </doc>
+    </method>
+    <method name="getExecutionType" return="org.apache.hadoop.yarn.api.records.ExecutionType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the ExecutionType of container to allocate
+ @return ExecutionType]]>
+      </doc>
+    </method>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.ContainerTokenIdentifierProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getLogAggregationContext" return="org.apache.hadoop.yarn.api.records.LogAggregationContext"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocationRequestId" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getVersion" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the Container version
+ @return container version]]>
+      </doc>
+    </method>
+    <method name="getNodeLabelExpression" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the node-label-expression in the original ResourceRequest]]>
+      </doc>
+    </method>
+    <method name="getAllcationTags" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[TokenIdentifier for a container. Encodes {@link ContainerId},
+ {@link Resource} needed by the container and the target NMs host-address.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.ContainerTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.ContainerTokenSelector -->
+  <class name="ContainerTokenSelector" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+    <constructor name="ContainerTokenSelector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.ContainerTokenSelector -->
+  <!-- start class org.apache.hadoop.yarn.security.NMTokenIdentifier -->
+  <class name="NMTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NMTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId, org.apache.hadoop.yarn.api.records.NodeId, java.lang.String, int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="NMTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Default constructor needed by RPC/Secret manager]]>
+      </doc>
+    </constructor>
+    <method name="getApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationSubmitter" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getKeyId" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.NMTokenIdentifierProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.NMTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.SchedulerSecurityInfo -->
+  <class name="SchedulerSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SchedulerSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.SchedulerSecurityInfo -->
+</package>
+<package name="org.apache.hadoop.yarn.security.admin">
+  <!-- start class org.apache.hadoop.yarn.security.admin.AdminSecurityInfo -->
+  <class name="AdminSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AdminSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.admin.AdminSecurityInfo -->
+</package>
+<package name="org.apache.hadoop.yarn.security.client">
+  <!-- start class org.apache.hadoop.yarn.security.client.BaseClientToAMTokenSecretManager -->
+  <class name="BaseClientToAMTokenSecretManager" extends="org.apache.hadoop.security.token.SecretManager"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="BaseClientToAMTokenSecretManager"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[A base {@link SecretManager} for AMs to extend and validate Client-RM tokens
+ issued to clients by the RM using the underlying master-key shared by RM to
+ the AMs on their launch. All the methods are called by either Hadoop RPC or
+ YARN, so this class is strictly for the purpose of inherit/extend and
+ register with Hadoop RPC.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.BaseClientToAMTokenSecretManager -->
+  <!-- start class org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo -->
+  <class name="ClientRMSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ClientRMSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo -->
+  <!-- start class org.apache.hadoop.yarn.security.client.ClientTimelineSecurityInfo -->
+  <class name="ClientTimelineSecurityInfo" extends="org.apache.hadoop.security.SecurityInfo"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ClientTimelineSecurityInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getKerberosInfo" return="org.apache.hadoop.security.KerberosInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="getTokenInfo" return="org.apache.hadoop.security.token.TokenInfo"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.ClientTimelineSecurityInfo -->
+  <!-- start class org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier -->
+  <class name="ClientToAMTokenIdentifier" extends="org.apache.hadoop.security.token.TokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ClientToAMTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ClientToAMTokenIdentifier" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getApplicationAttemptID" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getClientName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProto" return="org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.ClientToAMTokenIdentifierProto"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="write"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="out" type="java.io.DataOutput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="readFields"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.DataInput"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="org.apache.hadoop.security.UserGroupInformation"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hashCode" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="equals" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="other" type="java.lang.Object"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND_NAME" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager -->
+  <class name="ClientToAMTokenSecretManager" extends="org.apache.hadoop.yarn.security.client.BaseClientToAMTokenSecretManager"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ClientToAMTokenSecretManager" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId, byte[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="retrievePassword" return="byte[]"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="identifier" type="org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier"/>
+      <exception name="SecretManager.InvalidToken" type="org.apache.hadoop.security.token.SecretManager.InvalidToken"/>
+    </method>
+    <method name="getMasterKey" return="javax.crypto.SecretKey"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptID" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"/>
+    </method>
+    <method name="setMasterKey"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="byte[]"/>
+    </method>
+    <doc>
+    <![CDATA[A simple {@link SecretManager} for AMs to validate Client-RM tokens issued to
+ clients by the RM using the underlying master-key shared by RM to the AMs on
+ their launch. All the methods are called by either Hadoop RPC or YARN, so
+ this class is strictly for the purpose of inherit/extend and register with
+ Hadoop RPC.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager -->
+  <!-- start class org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier -->
+  <class name="RMDelegationTokenIdentifier" extends="org.apache.hadoop.yarn.security.client.YARNDelegationTokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="RMDelegationTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="RMDelegationTokenIdentifier" type="org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new delegation token identifier
+ @param owner the effective username of the token owner
+ @param renewer the username of the renewer
+ @param realUser the real username of the token owner]]>
+      </doc>
+    </constructor>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND_NAME" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Delegation Token Identifier that identifies the delegation tokens from the 
+ Resource Manager.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.client.RMDelegationTokenSelector -->
+  <class name="RMDelegationTokenSelector" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+    <constructor name="RMDelegationTokenSelector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.RMDelegationTokenSelector -->
+  <!-- start class org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier -->
+  <class name="TimelineDelegationTokenIdentifier" extends="org.apache.hadoop.yarn.security.client.YARNDelegationTokenIdentifier"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineDelegationTokenIdentifier"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="TimelineDelegationTokenIdentifier" type="org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create a new timeline delegation token identifier
+
+ @param owner the effective username of the token owner
+ @param renewer the username of the renewer
+ @param realUser the real username of the token owner]]>
+      </doc>
+    </constructor>
+    <method name="getKind" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="KIND_NAME" type="org.apache.hadoop.io.Text"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier -->
+  <!-- start class org.apache.hadoop.yarn.security.client.TimelineDelegationTokenSelector -->
+  <class name="TimelineDelegationTokenSelector" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.security.token.TokenSelector"/>
+    <constructor name="TimelineDelegationTokenSelector"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="selectToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <param name="tokens" type="java.util.Collection"/>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.security.client.TimelineDelegationTokenSelector -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api">
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.client">
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.service">
+</package>
+<package name="org.apache.hadoop.yarn.sharedcache">
+  <!-- start interface org.apache.hadoop.yarn.sharedcache.SharedCacheChecksum -->
+  <interface name="SharedCacheChecksum"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="computeChecksum" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="in" type="java.io.InputStream"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Calculate the checksum of the passed input stream.
+
+ @param in <code>InputStream</code> to be checksumed
+ @return the message digest of the input stream
+ @throws IOException]]>
+      </doc>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.sharedcache.SharedCacheChecksum -->
+  <!-- start class org.apache.hadoop.yarn.sharedcache.SharedCacheChecksumFactory -->
+  <class name="SharedCacheChecksumFactory" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="SharedCacheChecksumFactory"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getChecksum" return="org.apache.hadoop.yarn.sharedcache.SharedCacheChecksum"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Get a new <code>SharedCacheChecksum</code> object based on the configurable
+ algorithm implementation
+ (see <code>yarn.sharedcache.checksum.algo.impl</code>)
+
+ @return <code>SharedCacheChecksum</code> object]]>
+      </doc>
+    </method>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.sharedcache.SharedCacheChecksumFactory -->
+</package>
+<package name="org.apache.hadoop.yarn.state">
+  <!-- start class org.apache.hadoop.yarn.state.InvalidStateTransitionException -->
+  <class name="InvalidStateTransitionException" extends="org.apache.hadoop.yarn.state.InvalidStateTransitonException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="InvalidStateTransitionException" type="java.lang.Enum, java.lang.Enum"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <doc>
+    <![CDATA[The exception that happens when you call invalid state transition.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.state.InvalidStateTransitionException -->
+  <!-- start class org.apache.hadoop.yarn.state.InvalidStateTransitonException -->
+  <class name="InvalidStateTransitonException" extends="org.apache.hadoop.yarn.exceptions.YarnRuntimeException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="Use {@link InvalidStateTransitionException} instead.">
+    <constructor name="InvalidStateTransitonException" type="java.lang.Enum, java.lang.Enum"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getCurrentState" return="java.lang.Enum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getEvent" return="java.lang.Enum"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[@deprecated Use {@link InvalidStateTransitionException} instead.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.state.InvalidStateTransitonException -->
+  <!-- start interface org.apache.hadoop.yarn.state.MultipleArcTransition -->
+  <interface name="MultipleArcTransition"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="transition" return="STATE"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="operand" type="OPERAND"/>
+      <param name="event" type="EVENT"/>
+      <doc>
+      <![CDATA[Transition hook.
+ @return the postState. Post state must be one of the 
+                      valid post states registered in StateMachine.
+ @param operand the entity attached to the FSM, whose internal 
+                state may change.
+ @param event causal event]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Hook for Transition. 
+ Post state is decided by Transition hook. Post state must be one of the 
+ valid post states registered in StateMachine.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.state.MultipleArcTransition -->
+  <!-- start interface org.apache.hadoop.yarn.state.SingleArcTransition -->
+  <interface name="SingleArcTransition"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="transition"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="operand" type="OPERAND"/>
+      <param name="event" type="EVENT"/>
+      <doc>
+      <![CDATA[Transition hook.
+ 
+ @param operand the entity attached to the FSM, whose internal 
+                state may change.
+ @param event causal event]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[Hook for Transition. This lead to state machine to move to 
+ the post state as registered in the state machine.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.state.SingleArcTransition -->
+  <!-- start interface org.apache.hadoop.yarn.state.StateMachine -->
+  <interface name="StateMachine"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getCurrentState" return="STATE"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="doTransition" return="STATE"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="eventType" type="EVENTTYPE"/>
+      <param name="event" type="EVENT"/>
+      <exception name="InvalidStateTransitionException" type="org.apache.hadoop.yarn.state.InvalidStateTransitionException"/>
+    </method>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.state.StateMachine -->
+  <!-- start class org.apache.hadoop.yarn.state.StateMachineFactory -->
+  <class name="StateMachineFactory" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="StateMachineFactory" type="STATE"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor
+
+ This is the only constructor in the API.]]>
+      </doc>
+    </constructor>
+    <method name="addTransition" return="org.apache.hadoop.yarn.state.StateMachineFactory"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="preState" type="STATE"/>
+      <param name="postState" type="STATE"/>
+      <param name="eventType" type="EVENTTYPE"/>
+      <doc>
+      <![CDATA[@return a NEW StateMachineFactory just like {@code this} with the current
+          transition added as a new legal transition.  This overload
+          has no hook object.
+
+         Note that the returned StateMachineFactory is a distinct
+         object.
+
+         This method is part of the API.
+
+ @param preState pre-transition state
+ @param postState post-transition state
+ @param eventType stimulus for the transition]]>
+      </doc>
+    </method>
+    <method name="addTransition" return="org.apache.hadoop.yarn.state.StateMachineFactory"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="preState" type="STATE"/>
+      <param name="postState" type="STATE"/>
+      <param name="eventTypes" type="java.util.Set"/>
+      <doc>
+      <![CDATA[@return a NEW StateMachineFactory just like {@code this} with the current
+          transition added as a new legal transition.  This overload
+          has no hook object.
+
+
+         Note that the returned StateMachineFactory is a distinct
+         object.
+
+         This method is part of the API.
+
+ @param preState pre-transition state
+ @param postState post-transition state
+ @param eventTypes List of stimuli for the transitions]]>
+      </doc>
+    </method>
+    <method name="addTransition" return="org.apache.hadoop.yarn.state.StateMachineFactory"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="preState" type="STATE"/>
+      <param name="postState" type="STATE"/>
+      <param name="eventTypes" type="java.util.Set"/>
+      <param name="hook" type="org.apache.hadoop.yarn.state.SingleArcTransition"/>
+      <doc>
+      <![CDATA[@return a NEW StateMachineFactory just like {@code this} with the current
+          transition added as a new legal transition
+
+         Note that the returned StateMachineFactory is a distinct
+         object.
+
+         This method is part of the API.
+
+ @param preState pre-transition state
+ @param postState post-transition state
+ @param eventTypes List of stimuli for the transitions
+ @param hook transition hook]]>
+      </doc>
+    </method>
+    <method name="addTransition" return="org.apache.hadoop.yarn.state.StateMachineFactory"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="preState" type="STATE"/>
+      <param name="postState" type="STATE"/>
+      <param name="eventType" type="EVENTTYPE"/>
+      <param name="hook" type="org.apache.hadoop.yarn.state.SingleArcTransition"/>
+      <doc>
+      <![CDATA[@return a NEW StateMachineFactory just like {@code this} with the current
+          transition added as a new legal transition
+
+         Note that the returned StateMachineFactory is a distinct object.
+
+         This method is part of the API.
+
+ @param preState pre-transition state
+ @param postState post-transition state
+ @param eventType stimulus for the transition
+ @param hook transition hook]]>
+      </doc>
+    </method>
+    <method name="addTransition" return="org.apache.hadoop.yarn.state.StateMachineFactory"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="preState" type="STATE"/>
+      <param name="postStates" type="java.util.Set"/>
+      <param name="eventType" type="EVENTTYPE"/>
+      <param name="hook" type="org.apache.hadoop.yarn.state.MultipleArcTransition"/>
+      <doc>
+      <![CDATA[@return a NEW StateMachineFactory just like {@code this} with the current
+          transition added as a new legal transition
+
+         Note that the returned StateMachineFactory is a distinct object.
+
+         This method is part of the API.
+
+ @param preState pre-transition state
+ @param postStates valid post-transition states
+ @param eventType stimulus for the transition
+ @param hook transition hook]]>
+      </doc>
+    </method>
+    <method name="installTopology" return="org.apache.hadoop.yarn.state.StateMachineFactory"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[@return a StateMachineFactory just like {@code this}, except that if
+         you won't need any synchronization to build a state machine
+
+         Note that the returned StateMachineFactory is a distinct object.
+
+         This method is part of the API.
+
+         The only way you could distinguish the returned
+         StateMachineFactory from {@code this} would be by
+         measuring the performance of the derived 
+         {@code StateMachine} you can get from it.
+
+ Calling this is optional.  It doesn't change the semantics of the factory,
+   if you call it then when you use the factory there is no synchronization.]]>
+      </doc>
+    </method>
+    <method name="make" return="org.apache.hadoop.yarn.state.StateMachine"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="operand" type="OPERAND"/>
+      <param name="initialState" type="STATE"/>
+      <param name="listener" type="org.apache.hadoop.yarn.state.StateTransitionListener"/>
+      <doc>
+      <![CDATA[A StateMachine that accepts a transition listener.
+ @param operand the object upon which the returned
+                {@link StateMachine} will operate.
+ @param initialState the state in which the returned
+                {@link StateMachine} will start.
+ @param listener An implementation of a {@link StateTransitionListener}.
+ @return A (@link StateMachine}.]]>
+      </doc>
+    </method>
+    <method name="make" return="org.apache.hadoop.yarn.state.StateMachine"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="operand" type="OPERAND"/>
+      <param name="initialState" type="STATE"/>
+    </method>
+    <method name="make" return="org.apache.hadoop.yarn.state.StateMachine"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="operand" type="OPERAND"/>
+    </method>
+    <method name="generateStateGraph" return="org.apache.hadoop.yarn.state.Graph"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="name" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Generate a graph represents the state graph of this StateMachine
+ @param name graph name
+ @return Graph object generated]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[State machine topology.
+ This object is semantically immutable.  If you have a
+ StateMachineFactory there's no operation in the API that changes
+ its semantic properties.
+
+ @param <OPERAND> The object type on which this state machine operates.
+ @param <STATE> The state of the entity.
+ @param <EVENTTYPE> The external eventType to be handled.
+ @param <EVENT> The event object.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.state.StateMachineFactory -->
+  <!-- start interface org.apache.hadoop.yarn.state.StateTransitionListener -->
+  <interface name="StateTransitionListener"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="preTransition"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="op" type="OPERAND"/>
+      <param name="beforeState" type="STATE"/>
+      <param name="eventToBeProcessed" type="EVENT"/>
+      <doc>
+      <![CDATA[Pre Transition Hook. This will be called before transition.
+ @param op Operand.
+ @param beforeState State before transition.
+ @param eventToBeProcessed Incoming Event.]]>
+      </doc>
+    </method>
+    <method name="postTransition"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="op" type="OPERAND"/>
+      <param name="beforeState" type="STATE"/>
+      <param name="afterState" type="STATE"/>
+      <param name="processedEvent" type="EVENT"/>
+      <doc>
+      <![CDATA[Post Transition Hook. This will be called after the transition.
+ @param op Operand.
+ @param beforeState State before transition.
+ @param afterState State after transition.
+ @param processedEvent Processed Event.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A State Transition Listener.
+ It exposes a pre and post transition hook called before and
+ after the transition.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.state.StateTransitionListener -->
+</package>
+<package name="org.apache.hadoop.yarn.util">
+  <!-- start class org.apache.hadoop.yarn.util.AbstractLivelinessMonitor -->
+  <class name="AbstractLivelinessMonitor" extends="org.apache.hadoop.service.AbstractService"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AbstractLivelinessMonitor" type="java.lang.String, org.apache.hadoop.yarn.util.Clock"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AbstractLivelinessMonitor" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="serviceStart"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="serviceStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="expire"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="ob" type="O"/>
+    </method>
+    <method name="setExpireInterval"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="expireInterval" type="int"/>
+    </method>
+    <method name="getExpireInterval" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="o" type="O"/>
+    </method>
+    <method name="setMonitorInterval"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="monitorInterval" type="long"/>
+    </method>
+    <method name="receivedPing"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ob" type="O"/>
+    </method>
+    <method name="register"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ob" type="O"/>
+    </method>
+    <method name="register"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ob" type="O"/>
+      <param name="expireTime" type="long"/>
+    </method>
+    <method name="unregister"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="ob" type="O"/>
+    </method>
+    <method name="resetTimer"
+      abstract="false" native="false" synchronized="true"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="setResetTimeOnStart"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="resetTimeOnStart" type="boolean"/>
+    </method>
+    <field name="DEFAULT_EXPIRE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A simple liveliness monitor with which clients can register, trust the
+ component to monitor liveliness, get a call-back on expiry and then finally
+ unregister.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.util.AbstractLivelinessMonitor -->
+  <!-- start class org.apache.hadoop.yarn.util.ApplicationClassLoader -->
+  <class name="ApplicationClassLoader" extends="org.apache.hadoop.util.ApplicationClassLoader"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ApplicationClassLoader" type="java.net.URL[], java.lang.ClassLoader, java.util.List"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ApplicationClassLoader" type="java.lang.String, java.lang.ClassLoader, java.util.List"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="MalformedURLException" type="java.net.MalformedURLException"/>
+    </constructor>
+    <doc>
+    <![CDATA[This type has been deprecated in favor of
+ {@link org.apache.hadoop.util.ApplicationClassLoader}. All new uses of
+ ApplicationClassLoader should use that type instead.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.util.ApplicationClassLoader -->
+  <!-- start class org.apache.hadoop.yarn.util.BoundedAppender -->
+  <class name="BoundedAppender" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="BoundedAppender" type="int"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="append" return="org.apache.hadoop.yarn.util.BoundedAppender"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="csq" type="java.lang.CharSequence"/>
+      <doc>
+      <![CDATA[Append a {@link CharSequence} considering {@link #limit}, truncating
+ from the head of {@code csq} or {@link #messages} when necessary.
+
+ @param csq the {@link CharSequence} to append
+ @return this]]>
+      </doc>
+    </method>
+    <method name="length" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get current length of messages considering truncates
+ without header and ellipses.
+
+ @return current length]]>
+      </doc>
+    </method>
+    <method name="getLimit" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a string representation of the actual contents, displaying also a
+ header and ellipses when there was a truncate.
+
+ @return String representation of the {@link #messages}]]>
+      </doc>
+    </method>
+    <field name="TRUNCATED_MESSAGES_TEMPLATE" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A {@link CharSequence} appender that considers its {@link #limit} as upper
+ bound.
+ <p>
+ When {@link #limit} would be reached on append, past messages will be
+ truncated from head, and a header telling the user about truncation will be
+ prepended, with ellipses in between header and messages.
+ <p>
+ Note that header and ellipses are not counted against {@link #limit}.
+ <p>
+ An example:
+
+ <pre>
+ {@code
+   // At the beginning it's an empty string
+   final Appendable shortAppender = new BoundedAppender(80);
+   // The whole message fits into limit
+   shortAppender.append(
+       "message1 this is a very long message but fitting into limit\n");
+   // The first message is truncated, the second not
+   shortAppender.append("message2 this is shorter than the previous one\n");
+   // The first message is deleted, the second truncated, the third
+   // preserved
+   shortAppender.append("message3 this is even shorter message, maybe.\n");
+   // The first two are deleted, the third one truncated, the last preserved
+   shortAppender.append("message4 the shortest one, yet the greatest :)");
+   // Current contents are like this:
+   // Diagnostic messages truncated, showing last 80 chars out of 199:
+   // ...s is even shorter message, maybe.
+   // message4 the shortest one, yet the greatest :)
+ }
+ </pre>
+ <p>
+ Note that <tt>null</tt> values are {@link #append(CharSequence) append}ed
+ just like in {@link StringBuilder#append(CharSequence) original
+ implementation}.
+ <p>
+ Note that this class is not thread safe.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.util.BoundedAppender -->
+  <!-- start interface org.apache.hadoop.yarn.util.Clock -->
+  <interface name="Clock"    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <method name="getTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[A simple clock interface that gives you time.]]>
+    </doc>
+  </interface>
+  <!-- end interface org.apache.hadoop.yarn.util.Clock -->
+  <!-- start class org.apache.hadoop.yarn.util.ConverterUtils -->
+  <class name="ConverterUtils" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ConverterUtils"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getPathFromYarnURL" return="org.apache.hadoop.fs.Path"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="url" type="org.apache.hadoop.yarn.api.records.URL"/>
+      <exception name="URISyntaxException" type="java.net.URISyntaxException"/>
+      <doc>
+      <![CDATA[return a hadoop path from a given url
+ This method is deprecated, use {@link URL#toPath()} instead.
+ 
+ @param url
+          url to convert
+ @return path from {@link URL}
+ @throws URISyntaxException]]>
+      </doc>
+    </method>
+    <method name="getYarnUrlFromPath" return="org.apache.hadoop.yarn.api.records.URL"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+    </method>
+    <method name="getYarnUrlFromURI" return="org.apache.hadoop.yarn.api.records.URL"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+    </method>
+    <method name="toApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="recordFactory" type="org.apache.hadoop.yarn.factories.RecordFactory"/>
+      <param name="applicationIdStr" type="java.lang.String"/>
+    </method>
+    <method name="toString" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="cId" type="org.apache.hadoop.yarn.api.records.ContainerId"/>
+    </method>
+    <method name="toNodeId" return="org.apache.hadoop.yarn.api.records.NodeId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="nodeIdStr" type="java.lang.String"/>
+    </method>
+    <method name="toContainerId" return="org.apache.hadoop.yarn.api.records.ContainerId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerIdStr" type="java.lang.String"/>
+    </method>
+    <method name="toApplicationAttemptId" return="org.apache.hadoop.yarn.api.records.ApplicationAttemptId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="applicationAttemptIdStr" type="java.lang.String"/>
+    </method>
+    <method name="toApplicationId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appIdStr" type="java.lang.String"/>
+    </method>
+    <method name="convertFromYarn" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protoToken" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <param name="serviceAddr" type="java.net.InetSocketAddress"/>
+      <doc>
+      <![CDATA[Convert a protobuf token into a rpc token and set its service. Supposed
+ to be used for tokens other than RMDelegationToken. For
+ RMDelegationToken, use
+ {@link #convertFromYarn(org.apache.hadoop.yarn.api.records.Token,
+ org.apache.hadoop.io.Text)} instead.
+
+ @param protoToken the yarn token
+ @param serviceAddr the connect address for the service
+ @return rpc token]]>
+      </doc>
+    </method>
+    <method name="convertFromYarn" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="protoToken" type="org.apache.hadoop.yarn.api.records.Token"/>
+      <param name="service" type="org.apache.hadoop.io.Text"/>
+      <doc>
+      <![CDATA[Convert a protobuf token into a rpc token and set its service.
+
+ @param protoToken the yarn token
+ @param service the service for the token]]>
+      </doc>
+    </method>
+    <field name="APPLICATION_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="CONTAINER_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="APPLICATION_ATTEMPT_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[This class contains a set of utilities which help converting data structures
+ from/to 'serializableFormat' to/from hadoop/nativejava data structures.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.util.ConverterUtils -->
+  <!-- start class org.apache.hadoop.yarn.util.MonotonicClock -->
+  <class name="MonotonicClock" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.util.Clock"/>
+    <constructor name="MonotonicClock"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get current time from some arbitrary time base in the past, counting in
+ milliseconds, and not affected by settimeofday or similar system clock
+ changes.
+ @return a monotonic clock that counts in milliseconds.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A monotonic clock from some arbitrary time base in the past, counting in
+ milliseconds, and not affected by settimeofday or similar system clock
+ changes.
+ This is appropriate to use when computing how much longer to wait for an
+ interval to expire.
+ This function can return a negative value and it must be handled correctly
+ by callers. See the documentation of System#nanoTime for caveats.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.util.MonotonicClock -->
+  <!-- start class org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree -->
+  <class name="ResourceCalculatorProcessTree" extends="org.apache.hadoop.conf.Configured"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ResourceCalculatorProcessTree" type="java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create process-tree instance with specified root process.
+
+ Subclass must override this.
+ @param root process-tree root-process]]>
+      </doc>
+    </constructor>
+    <method name="initialize"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Initialize the object.
+ @throws YarnException Throws an exception on error.]]>
+      </doc>
+    </method>
+    <method name="updateProcessTree"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Update the process-tree with latest state.
+
+ Each call to this function should increment the age of the running
+ processes that already exist in the process tree. Age is used other API's
+ of the interface.]]>
+      </doc>
+    </method>
+    <method name="getProcessTreeDump" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get a dump of the process-tree.
+
+ @return a string concatenating the dump of information of all the processes
+         in the process-tree]]>
+      </doc>
+    </method>
+    <method name="getVirtualMemorySize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the virtual memory used by all the processes in the
+ process-tree.
+
+ @return virtual memory used by the process-tree in bytes,
+ {@link #UNAVAILABLE} if it cannot be calculated.]]>
+      </doc>
+    </method>
+    <method name="getRssMemorySize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the resident set size (rss) memory used by all the processes
+ in the process-tree.
+
+ @return rss memory used by the process-tree in bytes,
+ {@link #UNAVAILABLE} if it cannot be calculated.]]>
+      </doc>
+    </method>
+    <method name="getVirtualMemorySize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="olderThanAge" type="int"/>
+      <doc>
+      <![CDATA[Get the virtual memory used by all the processes in the
+ process-tree that are older than the passed in age.
+
+ @param olderThanAge processes above this age are included in the
+                     memory addition
+ @return virtual memory used by the process-tree in bytes for
+ processes older than the specified age, {@link #UNAVAILABLE} if it
+ cannot be calculated.]]>
+      </doc>
+    </method>
+    <method name="getRssMemorySize" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="olderThanAge" type="int"/>
+      <doc>
+      <![CDATA[Get the resident set size (rss) memory used by all the processes
+ in the process-tree that are older than the passed in age.
+
+ @param olderThanAge processes above this age are included in the
+                     memory addition
+ @return rss memory used by the process-tree in bytes for
+ processes older than specified age, {@link #UNAVAILABLE} if it cannot be
+ calculated.]]>
+      </doc>
+    </method>
+    <method name="getCumulativeCpuTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the CPU time in millisecond used by all the processes in the
+ process-tree since the process-tree was created
+
+ @return cumulative CPU time in millisecond since the process-tree
+ created, {@link #UNAVAILABLE} if it cannot be calculated.]]>
+      </doc>
+    </method>
+    <method name="getCpuUsagePercent" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the CPU usage by all the processes in the process-tree based on
+ average between samples as a ratio of overall CPU cycles similar to top.
+ Thus, if 2 out of 4 cores are used this should return 200.0.
+ Note: UNAVAILABLE will be returned in case when CPU usage is not
+ available. It is NOT advised to return any other error code.
+
+ @return percentage CPU usage since the process-tree was created,
+ {@link #UNAVAILABLE} if CPU usage cannot be calculated or not available.]]>
+      </doc>
+    </method>
+    <method name="checkPidPgrpidForMatch" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Verify that the tree process id is same as its process group id.
+ @return true if the process id matches else return false.]]>
+      </doc>
+    </method>
+    <method name="getResourceCalculatorProcessTree" return="org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="pid" type="java.lang.String"/>
+      <param name="clazz" type="java.lang.Class"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Create the ResourceCalculatorProcessTree rooted to specified process 
+ from the class name and configure it. If class name is null, this method
+ will try and return a process tree plugin available for this system.
+
+ @param pid process pid of the root of the process tree
+ @param clazz class-name
+ @param conf configure the plugin with this.
+
+ @return ResourceCalculatorProcessTree or null if ResourceCalculatorPluginTree
+         is not available for this system.]]>
+      </doc>
+    </method>
+    <field name="UNAVAILABLE" type="int"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[Interface class to obtain process resource usage
+ NOTE: This class should not be used by external users, but only by external
+ developers to extend and include their own process-tree implementation, 
+ especially for platforms other than Linux and Windows.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.util.ResourceCalculatorProcessTree -->
+  <!-- start class org.apache.hadoop.yarn.util.SystemClock -->
+  <class name="SystemClock" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="true" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.util.Clock"/>
+    <constructor name="SystemClock"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getInstance" return="org.apache.hadoop.yarn.util.SystemClock"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Implementation of {@link Clock} that gives the current time from the system
+ clock in milliseconds.
+ 
+ NOTE: Do not use this to calculate a duration of expire or interval to sleep,
+ because it will be broken by settimeofday. Please use {@link MonotonicClock}
+ instead.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.util.SystemClock -->
+  <!-- start class org.apache.hadoop.yarn.util.UTCClock -->
+  <class name="UTCClock" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="org.apache.hadoop.yarn.util.Clock"/>
+    <constructor name="UTCClock"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Implementation of {@link Clock} that gives the current UTC time in
+ milliseconds.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.util.UTCClock -->
+</package>
+<package name="org.apache.hadoop.yarn.util.resource">
+</package>
+<package name="org.apache.hadoop.yarn.util.timeline">
+  <!-- start class org.apache.hadoop.yarn.util.timeline.TimelineUtils -->
+  <class name="TimelineUtils" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="TimelineUtils"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="dumpTimelineRecordtoJSON" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <exception name="JsonGenerationException" type="com.fasterxml.jackson.core.JsonGenerationException"/>
+      <exception name="JsonMappingException" type="com.fasterxml.jackson.databind.JsonMappingException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Serialize a POJO object into a JSON string not in a pretty format
+ 
+ @param o
+          an object to serialize
+ @return a JSON string
+ @throws IOException
+ @throws JsonMappingException
+ @throws JsonGenerationException]]>
+      </doc>
+    </method>
+    <method name="dumpTimelineRecordtoJSON" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="o" type="java.lang.Object"/>
+      <param name="pretty" type="boolean"/>
+      <exception name="JsonGenerationException" type="com.fasterxml.jackson.core.JsonGenerationException"/>
+      <exception name="JsonMappingException" type="com.fasterxml.jackson.databind.JsonMappingException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Serialize a POJO object into a JSON string
+ 
+ @param o
+          an object to serialize
+ @param pretty
+          whether in a pretty format or not
+ @return a JSON string
+ @throws IOException
+ @throws JsonMappingException
+ @throws JsonGenerationException]]>
+      </doc>
+    </method>
+    <method name="timelineServiceEnabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Returns whether the timeline service is enabled via configuration.
+
+ @param conf the configuration
+ @return whether the timeline service is enabled.]]>
+      </doc>
+    </method>
+    <method name="getTimelineServiceVersion" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Returns the timeline service version. It does not check whether the
+ timeline service itself is enabled.
+
+ @param conf the configuration
+ @return the timeline service version as a float.]]>
+      </doc>
+    </method>
+    <method name="timelineServiceV1_5Enabled" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Returns whether the timeline service v.1.5 is enabled by default via
+ configuration.
+
+ @param conf the configuration
+ @return whether the timeline service v.1.5 is enabled. V.1.5 refers to a
+ version equal to 1.5.]]>
+      </doc>
+    </method>
+    <method name="createTimelineAbout" return="org.apache.hadoop.yarn.api.records.timeline.TimelineAbout"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="about" type="java.lang.String"/>
+    </method>
+    <method name="getTimelineTokenServiceAddress" return="java.net.InetSocketAddress"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="buildTimelineTokenService" return="org.apache.hadoop.io.Text"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+    </method>
+    <method name="generateDefaultFlowName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appName" type="java.lang.String"/>
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+    </method>
+    <method name="generateFlowNameTag" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="flowName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Generate flow name tag.
+
+ @param flowName flow name that identifies a distinct flow application which
+                 can be run repeatedly over time
+ @return flow name tag.]]>
+      </doc>
+    </method>
+    <method name="shortenFlowName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="flowName" type="java.lang.String"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <doc>
+      <![CDATA[Shortens the flow name for the configured size by removing UUID if present.
+
+ @param flowName which has to be shortened
+ @param conf to resize the flow name
+ @return shortened flowName]]>
+      </doc>
+    </method>
+    <method name="generateFlowVersionTag" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="flowVersion" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Generate flow version tag.
+
+ @param flowVersion flow version that keeps track of the changes made to the
+                    flow
+ @return flow version tag.]]>
+      </doc>
+    </method>
+    <method name="generateFlowRunIdTag" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="flowRunId" type="long"/>
+      <doc>
+      <![CDATA[Generate flow run ID tag.
+
+ @param flowRunId flow run ID that identifies one instance (or specific
+                  execution) of that flow
+ @return flow run id tag.]]>
+      </doc>
+    </method>
+    <field name="FLOW_NAME_TAG_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FLOW_VERSION_TAG_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="FLOW_RUN_ID_TAG_PREFIX" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <field name="DEFAULT_FLOW_VERSION" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[The helper class for the timeline module.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.util.timeline.TimelineUtils -->
+</package>
+<package name="org.apache.hadoop.yarn.webapp.util">
+</package>
+
+</api>
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.3.4.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.3.4.xml
new file mode 100644
index 0000000..1f525eb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.3.4.xml
@@ -0,0 +1,1456 @@
+<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
+<!-- Generated by the JDiff Javadoc doclet -->
+<!-- (http://www.jdiff.org) -->
+<!-- on Fri Jul 29 14:10:26 GMT 2022 -->
+
+<api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name="Apache Hadoop YARN Server Common 3.3.4"
+  jdversion="1.0.9">
+
+<!--  Command line arguments =  -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/hadoop-annotations.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/jdiff.jar -verbose -classpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/classes:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-3.3.4.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-protobuf_3_7/1.1.1/hadoop-shaded-protobuf_3_7-1.1.1.jar:/maven/com/google/guava/guava/27.0-jre/guava-27.0-jre.jar:/maven/com/google/guava/failureaccess/1.0/failureaccess-1.0.jar:/maven/com/google/guava/listenablefuture/9999.0-empty-to-avoid-conflict-with-guava/listenablefuture-9999.0-empty-to-avoid-conflict-with-guava.jar:/maven/org/checkerframework/checker-qual/2.5.2/checker-qual-2.5.2.jar:/maven/com/google/j2objc/j2objc-annotations/1.1/j2objc-annotations-1.1.jar:/maven/org/codehaus/mojo/animal-sniffer-annotations/1.17/animal-sniffer-annotations-1.17.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/org/apache/httpcomponents/httpclient/4.5.13/httpclient-4.5.13.jar:/maven/org/apache/httpcomponents/httpcore/4.4.13/httpcore-4.4.13.jar:/maven/commons-codec/commons-codec/1.15/commons-codec-1.15.jar:/maven/commons-io/commons-io/2.8.0/commons-io-2.8.0.jar:/maven/commons-net/commons-net/3.6/commons-net-3.6.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/javax/servlet/javax.servlet-api/3.1.0/javax.servlet-api-3.1.0.jar:/maven/jakarta/activation/jakarta.activation-api/1.2.1/jakarta.activation-api-1.2.1.jar:/maven/org/eclipse/jetty/jetty-server/9.4.43.v20210629/jetty-server-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-http/9.4.43.v20210629/jetty-http-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-io/9.4.43.v20210629/jetty-io-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-util/9.4.43.v20210629/jetty-util-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-servlet/9.4.43.v20210629/jetty-servlet-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-security/9.4.43.v20210629/jetty-security-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-util-ajax/9.4.43.v20210629/jetty-util-ajax-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-webapp/9.4.43.v20210629/jetty-webapp-9.4.43.v20210629.jar:/maven/org/eclipse/jetty/jetty-xml/9.4.43.v20210629/jetty-xml-9.4.43.v20210629.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-core/1.19/jersey-core-1.19.jar:/maven/javax/ws/rs/jsr311-api/1.1.1/jsr311-api-1.1.1.jar:/maven/com/sun/jersey/jersey-servlet/1.19/jersey-servlet-1.19.jar:/maven/com/sun/jersey/jersey-json/1.19/jersey-json-1.19.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/com/sun/jersey/jersey-server/1.19/jersey-server-1.19.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/ch/qos/reload4j/reload4j/1.2.22/reload4j-1.2.22.jar:/maven/commons-beanutils/commons-beanutils/1.9.4/commons-beanutils-1.9.4.jar:/maven/org/apache/commons/commons-configuration2/2.1.1/commons-configuration2-2.1.1.jar:/maven/org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.jar:/maven/org/apache/commons/commons-text/1.4/commons-text-1.4.jar:/maven/org/slf4j/slf4j-api/1.7.36/slf4j-api-1.7.36.jar:/maven/org/apache/avro/avro/1.7.7/avro-1.7.7.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/com/google/re2j/re2j/1.1/re2j-1.1.jar:/maven/com/google/code/gson/gson/2.8.9/gson-2.8.9.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-3.3.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/9.8.1/nimbus-jose-jwt-9.8.1.jar:/maven/com/github/stephenc/jcip/jcip-annotations/1.0-1/jcip-annotations-1.0-1.jar:/maven/net/minidev/json-smart/2.4.7/json-smart-2.4.7.jar:/maven/net/minidev/accessors-smart/2.4.7/accessors-smart-2.4.7.jar:/maven/org/ow2/asm/asm/5.0.4/asm-5.0.4.jar:/maven/org/apache/kerby/kerb-simplekdc/1.0.1/kerb-simplekdc-1.0.1.jar:/maven/org/apache/kerby/kerb-client/1.0.1/kerb-client-1.0.1.jar:/maven/org/apache/kerby/kerby-config/1.0.1/kerby-config-1.0.1.jar:/maven/org/apache/kerby/kerb-common/1.0.1/kerb-common-1.0.1.jar:/maven/org/apache/kerby/kerb-crypto/1.0.1/kerb-crypto-1.0.1.jar:/maven/org/apache/kerby/kerb-util/1.0.1/kerb-util-1.0.1.jar:/maven/org/apache/kerby/token-provider/1.0.1/token-provider-1.0.1.jar:/maven/org/apache/kerby/kerb-admin/1.0.1/kerb-admin-1.0.1.jar:/maven/org/apache/kerby/kerb-server/1.0.1/kerb-server-1.0.1.jar:/maven/org/apache/kerby/kerb-identity/1.0.1/kerb-identity-1.0.1.jar:/maven/org/apache/kerby/kerby-xdr/1.0.1/kerby-xdr-1.0.1.jar:/maven/com/jcraft/jsch/0.1.55/jsch-0.1.55.jar:/maven/org/apache/curator/curator-client/4.2.0/curator-client-4.2.0.jar:/maven/org/apache/curator/curator-recipes/4.2.0/curator-recipes-4.2.0.jar:/maven/com/google/code/findbugs/jsr305/3.0.2/jsr305-3.0.2.jar:/maven/org/apache/commons/commons-compress/1.21/commons-compress-1.21.jar:/maven/org/apache/kerby/kerb-core/1.0.1/kerb-core-1.0.1.jar:/maven/org/apache/kerby/kerby-pkix/1.0.1/kerby-pkix-1.0.1.jar:/maven/org/apache/kerby/kerby-asn1/1.0.1/kerby-asn1-1.0.1.jar:/maven/org/apache/kerby/kerby-util/1.0.1/kerby-util-1.0.1.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.12.7/jackson-databind-2.12.7.jar:/maven/org/codehaus/woodstox/stax2-api/4.2.1/stax2-api-4.2.1.jar:/maven/com/fasterxml/woodstox/woodstox-core/5.3.0/woodstox-core-5.3.0.jar:/maven/dnsjava/dnsjava/2.1.7/dnsjava-2.1.7.jar:/maven/org/xerial/snappy/snappy-java/1.1.8.2/snappy-java-1.1.8.2.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/target/hadoop-yarn-api-3.3.4.jar:/maven/javax/xml/bind/jaxb-api/2.2.11/jaxb-api-2.2.11.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.12.7/jackson-annotations-2.12.7.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/target/hadoop-yarn-common-3.3.4.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-3.3.4.jar:/maven/com/squareup/okhttp3/okhttp/4.9.3/okhttp-4.9.3.jar:/maven/com/squareup/okio/okio/2.8.0/okio-2.8.0.jar:/maven/org/jetbrains/kotlin/kotlin-stdlib/1.4.10/kotlin-stdlib-1.4.10.jar:/maven/org/jetbrains/kotlin/kotlin-stdlib-common/1.4.10/kotlin-stdlib-common-1.4.10.jar:/maven/com/sun/jersey/jersey-client/1.19/jersey-client-1.19.jar:/maven/com/google/inject/extensions/guice-servlet/4.0/guice-servlet-4.0.jar:/maven/com/google/inject/guice/4.0/guice-4.0.jar:/maven/javax/inject/javax.inject/1/javax.inject-1.jar:/maven/aopalliance/aopalliance/1.0/aopalliance-1.0.jar:/maven/com/sun/jersey/contribs/jersey-guice/1.19/jersey-guice-1.19.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.12.7/jackson-core-2.12.7.jar:/maven/com/fasterxml/jackson/module/jackson-module-jaxb-annotations/2.12.7/jackson-module-jaxb-annotations-2.12.7.jar:/maven/jakarta/xml/bind/jakarta.xml.bind-api/2.3.2/jakarta.xml.bind-api-2.3.2.jar:/maven/com/fasterxml/jackson/jaxrs/jackson-jaxrs-json-provider/2.12.7/jackson-jaxrs-json-provider-2.12.7.jar:/maven/com/fasterxml/jackson/jaxrs/jackson-jaxrs-base/2.12.7/jackson-jaxrs-base-2.12.7.jar:/build/source/hadoop-common-project/hadoop-registry/target/hadoop-registry-3.3.4.jar:/maven/org/apache/curator/curator-framework/4.2.0/curator-framework-4.2.0.jar:/maven/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/maven/org/apache/hadoop/thirdparty/hadoop-shaded-guava/1.1.1/hadoop-shaded-guava-1.1.1.jar:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-3.3.4.jar:/usr/lib/jvm/java-8-openjdk-amd64/lib/tools.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/org/apache/zookeeper/zookeeper/3.5.6/zookeeper-3.5.6.jar:/maven/org/apache/zookeeper/zookeeper-jute/3.5.6/zookeeper-jute-3.5.6.jar:/maven/org/apache/yetus/audience-annotations/0.5.0/audience-annotations-0.5.0.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/maven/org/apache/geronimo/specs/geronimo-jcache_1.0_spec/1.0-alpha-1/geronimo-jcache_1.0_spec-1.0-alpha-1.jar:/maven/org/ehcache/ehcache/3.3.1/ehcache-3.3.1.jar:/maven/com/zaxxer/HikariCP-java7/2.4.12/HikariCP-java7-2.4.12.jar:/maven/xerces/xercesImpl/2.12.2/xercesImpl-2.12.2.jar:/maven/xml-apis/xml-apis/1.4.01/xml-apis-1.4.01.jar -sourcepath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/hadoop-annotations.jar:/build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/jdiff.jar -apidir /build/source/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/target/site/jdiff/xml -apiname Apache Hadoop YARN Server Common 3.3.4 -->
+<package name="org.apache.hadoop.yarn.server">
+</package>
+<package name="org.apache.hadoop.yarn.server.api">
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.client">
+</package>
+<package name="org.apache.hadoop.yarn.server.api.impl.pb.service">
+</package>
+<package name="org.apache.hadoop.yarn.server.api.records">
+  <!-- start class org.apache.hadoop.yarn.server.api.records.NodeHealthStatus -->
+  <class name="NodeHealthStatus" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="NodeHealthStatus"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getIsNodeHealthy" return="boolean"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Is the node healthy?
+ @return <code>true</code> if the node is healthy, else <code>false</code>]]>
+      </doc>
+    </method>
+    <method name="getHealthReport" return="java.lang.String"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>diagnostic health report</em> of the node.
+ @return <em>diagnostic health report</em> of the node]]>
+      </doc>
+    </method>
+    <method name="getLastHealthReportTime" return="long"
+      abstract="true" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the <em>last timestamp</em> at which the health report was received.
+ @return <em>last timestamp</em> at which the health report was received]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[{@code NodeHealthStatus} is a summary of the health status of the node.
+ <p>
+ It includes information such as:
+ <ul>
+   <li>
+     An indicator of whether the node is healthy, as determined by the
+     health-check script.
+   </li>
+   <li>The previous time at which the health status was reported.</li>
+   <li>A diagnostic report on the health status.</li>
+ </ul>
+ 
+ @see NodeReport
+ @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.api.records.NodeHealthStatus -->
+</package>
+<package name="org.apache.hadoop.yarn.server.api.records.impl.pb">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.failover">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.policies">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.policies.amrmproxy">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.policies.dao">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.policies.exceptions">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.policies.manager">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.policies.router">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.resolver">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.store">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.store.exception">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.store.impl">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.store.metrics">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.store.records">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.store.records.impl.pb">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.store.utils">
+</package>
+<package name="org.apache.hadoop.yarn.server.federation.utils">
+</package>
+<package name="org.apache.hadoop.yarn.server.metrics">
+</package>
+<package name="org.apache.hadoop.yarn.server.records">
+</package>
+<package name="org.apache.hadoop.yarn.server.records.impl.pb">
+</package>
+<package name="org.apache.hadoop.yarn.server.scheduler">
+</package>
+<package name="org.apache.hadoop.yarn.server.security.http">
+</package>
+<package name="org.apache.hadoop.yarn.server.sharedcache">
+</package>
+<package name="org.apache.hadoop.yarn.server.uam">
+  <!-- start class org.apache.hadoop.yarn.server.uam.UnmanagedAMPoolManager -->
+  <class name="UnmanagedAMPoolManager" extends="org.apache.hadoop.service.AbstractService"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UnmanagedAMPoolManager" type="java.util.concurrent.ExecutorService"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="serviceStart"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+    </method>
+    <method name="serviceStop"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="Exception" type="java.lang.Exception"/>
+      <doc>
+      <![CDATA[Normally we should finish all applications before stop. If there are still
+ UAMs running, force kill all of them. Do parallel kill because of
+ performance reasons.
+
+ TODO: move waiting for the kill to finish into a separate thread, without
+ blocking the serviceStop.]]>
+      </doc>
+    </method>
+    <method name="createAndRegisterNewUAM" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="registerRequest" type="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="queueName" type="java.lang.String"/>
+      <param name="submitter" type="java.lang.String"/>
+      <param name="appNameSuffix" type="java.lang.String"/>
+      <param name="keepContainersAcrossApplicationAttempts" type="boolean"/>
+      <param name="rmName" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a new UAM and register the application, without specifying uamId and
+ appId. We will ask for an appId from RM and use it as the uamId.
+
+ @param registerRequest RegisterApplicationMasterRequest
+ @param conf configuration for this UAM
+ @param queueName queue of the application
+ @param submitter submitter name of the UAM
+ @param appNameSuffix application name suffix for the UAM
+ @param keepContainersAcrossApplicationAttempts keep container flag for UAM
+          recovery.
+ @param rmName name of the YarnRM
+ @see ApplicationSubmissionContext
+          #setKeepContainersAcrossApplicationAttempts(boolean)
+ @return uamId for the UAM
+ @throws YarnException if registerApplicationMaster fails
+ @throws IOException if registerApplicationMaster fails]]>
+      </doc>
+    </method>
+    <method name="launchUAM" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uamId" type="java.lang.String"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="queueName" type="java.lang.String"/>
+      <param name="submitter" type="java.lang.String"/>
+      <param name="appNameSuffix" type="java.lang.String"/>
+      <param name="keepContainersAcrossApplicationAttempts" type="boolean"/>
+      <param name="rmName" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Launch a new UAM, using the provided uamId and appId.
+
+ @param uamId uam Id
+ @param conf configuration for this UAM
+ @param appId application id for the UAM
+ @param queueName queue of the application
+ @param submitter submitter name of the UAM
+ @param appNameSuffix application name suffix for the UAM
+ @param keepContainersAcrossApplicationAttempts keep container flag for UAM
+          recovery.
+ @param rmName name of the YarnRM
+ @see ApplicationSubmissionContext
+          #setKeepContainersAcrossApplicationAttempts(boolean)
+ @return UAM token
+ @throws YarnException if fails
+ @throws IOException if fails]]>
+      </doc>
+    </method>
+    <method name="reAttachUAM"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uamId" type="java.lang.String"/>
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="queueName" type="java.lang.String"/>
+      <param name="submitter" type="java.lang.String"/>
+      <param name="appNameSuffix" type="java.lang.String"/>
+      <param name="uamToken" type="org.apache.hadoop.security.token.Token"/>
+      <param name="rmName" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Re-attach to an existing UAM, using the provided uamIdentifier.
+
+ @param uamId uam Id
+ @param conf configuration for this UAM
+ @param appId application id for the UAM
+ @param queueName queue of the application
+ @param submitter submitter name of the UAM
+ @param appNameSuffix application name suffix for the UAM
+ @param uamToken UAM token
+ @param rmName name of the YarnRM
+ @throws YarnException if fails
+ @throws IOException if fails]]>
+      </doc>
+    </method>
+    <method name="createUAM" return="org.apache.hadoop.yarn.server.uam.UnmanagedApplicationManager"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="queueName" type="java.lang.String"/>
+      <param name="submitter" type="java.lang.String"/>
+      <param name="appNameSuffix" type="java.lang.String"/>
+      <param name="keepContainersAcrossApplicationAttempts" type="boolean"/>
+      <param name="rmName" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Creates the UAM instance. Pull out to make unit test easy.
+
+ @param conf Configuration
+ @param appId application id
+ @param queueName queue of the application
+ @param submitter submitter name of the application
+ @param appNameSuffix application name suffix
+ @param keepContainersAcrossApplicationAttempts keep container flag for UAM
+ @param rmName name of the YarnRM
+ @return the UAM instance]]>
+      </doc>
+    </method>
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uamId" type="java.lang.String"/>
+      <param name="registerRequest" type="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Register application master for the UAM.
+
+ @param uamId uam Id
+ @param registerRequest RegisterApplicationMasterRequest
+ @return register response
+ @throws YarnException if register fails
+ @throws IOException if register fails]]>
+      </doc>
+    </method>
+    <method name="allocateAsync"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uamId" type="java.lang.String"/>
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest"/>
+      <param name="callback" type="org.apache.hadoop.yarn.util.AsyncCallback"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[AllocateAsync to an UAM.
+
+ @param uamId uam Id
+ @param request AllocateRequest
+ @param callback callback for response
+ @throws YarnException if allocate fails
+ @throws IOException if allocate fails]]>
+      </doc>
+    </method>
+    <method name="finishApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uamId" type="java.lang.String"/>
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Finish an UAM/application.
+
+ @param uamId uam Id
+ @param request FinishApplicationMasterRequest
+ @return FinishApplicationMasterResponse
+ @throws YarnException if finishApplicationMaster call fails
+ @throws IOException if finishApplicationMaster call fails]]>
+      </doc>
+    </method>
+    <method name="shutDownConnections"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uamId" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Shutdown an UAM client without killing it in YarnRM.
+
+ @param uamId uam Id
+ @throws YarnException if fails]]>
+      </doc>
+    </method>
+    <method name="shutDownConnections"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Shutdown all UAM clients without killing them in YarnRM.
+
+ @throws YarnException if fails]]>
+      </doc>
+    </method>
+    <method name="getAllUAMIds" return="java.util.Set"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Get the id of all running UAMs.
+
+ @return uamId set]]>
+      </doc>
+    </method>
+    <method name="hasUAMId" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uamId" type="java.lang.String"/>
+      <doc>
+      <![CDATA[Return whether an UAM exists.
+
+ @param uamId uam Id
+ @return UAM exists or not]]>
+      </doc>
+    </method>
+    <method name="getAMRMClientRelayer" return="org.apache.hadoop.yarn.server.AMRMClientRelayer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uamId" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Return the rmProxy relayer of an UAM.
+
+ @param uamId uam Id
+ @return the rmProxy relayer
+ @throws YarnException if fails]]>
+      </doc>
+    </method>
+    <method name="getRequestQueueSize" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uamId" type="java.lang.String"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+    </method>
+    <method name="drainUAMHeartbeats"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="LOG" type="org.slf4j.Logger"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[A service that manages a pool of UAM managers in
+ {@link UnmanagedApplicationManager}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.uam.UnmanagedAMPoolManager -->
+  <!-- start class org.apache.hadoop.yarn.server.uam.UnmanagedApplicationManager -->
+  <class name="UnmanagedApplicationManager" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UnmanagedApplicationManager" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.yarn.api.records.ApplicationId, java.lang.String, java.lang.String, java.lang.String, boolean, java.lang.String"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Constructor.
+
+ @param conf configuration
+ @param appId application Id to use for this UAM
+ @param queueName the queue of the UAM
+ @param submitter user name of the app
+ @param appNameSuffix the app name suffix to use
+ @param rmName name of the YarnRM
+ @param keepContainersAcrossApplicationAttempts keep container flag for UAM
+          recovery. See {@link ApplicationSubmissionContext
+          #setKeepContainersAcrossApplicationAttempts(boolean)}]]>
+      </doc>
+    </constructor>
+    <method name="createAMHeartbeatRequestHandler" return="org.apache.hadoop.yarn.server.AMHeartbeatRequestHandler"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="config" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <param name="relayer" type="org.apache.hadoop.yarn.server.AMRMClientRelayer"/>
+    </method>
+    <method name="launchUAM" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Launch a new UAM in the resource manager.
+
+ @return identifier uam identifier
+ @throws YarnException if fails
+ @throws IOException if fails]]>
+      </doc>
+    </method>
+    <method name="reAttachUAM"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="amrmToken" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Re-attach to an existing UAM in the resource manager.
+
+ @param amrmToken the UAM token
+ @throws IOException if re-attach fails
+ @throws YarnException if re-attach fails]]>
+      </doc>
+    </method>
+    <method name="createUAMProxy"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="amrmToken" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+    </method>
+    <method name="registerApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Registers this {@link UnmanagedApplicationManager} with the resource
+ manager.
+
+ @param request RegisterApplicationMasterRequest
+ @return register response
+ @throws YarnException if register fails
+ @throws IOException if register fails]]>
+      </doc>
+    </method>
+    <method name="finishApplicationMaster" return="org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Unregisters from the resource manager and stops the request handler thread.
+
+ @param request the finishApplicationMaster request
+ @return the response
+ @throws YarnException if finishAM call fails
+ @throws IOException if finishAM call fails]]>
+      </doc>
+    </method>
+    <method name="forceKillApplication" return="org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Force kill the UAM.
+
+ @return kill response
+ @throws IOException if fails to create rmProxy
+ @throws YarnException if force kill fails]]>
+      </doc>
+    </method>
+    <method name="allocateAsync"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="request" type="org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest"/>
+      <param name="callback" type="org.apache.hadoop.yarn.util.AsyncCallback"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Sends the specified heart beat request to the resource manager and invokes
+ the callback asynchronously with the response.
+
+ @param request the allocate request
+ @param callback the callback method for the request
+ @throws YarnException if registerAM is not called yet]]>
+      </doc>
+    </method>
+    <method name="shutDownConnections"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Shutdown this UAM client, without killing the UAM in the YarnRM side.]]>
+      </doc>
+    </method>
+    <method name="getAppId" return="org.apache.hadoop.yarn.api.records.ApplicationId"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the application id of the UAM.
+
+ @return application id of the UAM]]>
+      </doc>
+    </method>
+    <method name="getAMRMClientRelayer" return="org.apache.hadoop.yarn.server.AMRMClientRelayer"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Returns the rmProxy relayer of this UAM.
+
+ @return rmProxy relayer of the UAM]]>
+      </doc>
+    </method>
+    <method name="createRMProxy" return="T"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="protocol" type="java.lang.Class"/>
+      <param name="config" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="user" type="org.apache.hadoop.security.UserGroupInformation"/>
+      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Returns RM proxy for the specified protocol type. Unit test cases can
+ override this method and return mock proxy instances.
+
+ @param protocol protocal of the proxy
+ @param config configuration
+ @param user ugi for the proxy connection
+ @param token token for the connection
+ @param <T> type of the proxy
+ @return the proxy instance
+ @throws IOException if fails to create the proxy]]>
+      </doc>
+    </method>
+    <method name="initializeUnmanagedAM" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <param name="appId" type="org.apache.hadoop.yarn.api.records.ApplicationId"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Launch and initialize an unmanaged AM. First, it creates a new application
+ on the RM and negotiates a new attempt id. Then it waits for the RM
+ application attempt state to reach YarnApplicationAttemptState.LAUNCHED
+ after which it returns the AM-RM token.
+
+ @param appId application id
+ @return the UAM token
+ @throws IOException if initialize fails
+ @throws YarnException if initialize fails]]>
+      </doc>
+    </method>
+    <method name="getUAMToken" return="org.apache.hadoop.security.token.Token"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="YarnException" type="org.apache.hadoop.yarn.exceptions.YarnException"/>
+      <doc>
+      <![CDATA[Gets the amrmToken of the unmanaged AM.
+
+ @return the amrmToken of the unmanaged AM.
+ @throws IOException if getApplicationReport fails
+ @throws YarnException if getApplicationReport fails]]>
+      </doc>
+    </method>
+    <method name="getRequestQueueSize" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="drainHeartbeatThread"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <method name="isHeartbeatThreadAlive" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </method>
+    <field name="APP_NAME" type="java.lang.String"
+      transient="false" volatile="false"
+      static="true" final="true" visibility="public"
+      deprecated="not deprecated">
+    </field>
+    <doc>
+    <![CDATA[UnmanagedApplicationManager is used to register unmanaged application and
+ negotiate for resources from resource managers. An unmanagedAM is an AM that
+ is not launched and managed by the RM. Allocate calls are handled
+ asynchronously using {@link AsyncCallback}.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.uam.UnmanagedApplicationManager -->
+</package>
+<package name="org.apache.hadoop.yarn.server.utils">
+  <!-- start class org.apache.hadoop.yarn.server.utils.LeveldbIterator -->
+  <class name="LeveldbIterator" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <implements name="java.util.Iterator"/>
+    <implements name="java.io.Closeable"/>
+    <constructor name="LeveldbIterator" type="org.iq80.leveldb.DB"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an iterator for the specified database]]>
+      </doc>
+    </constructor>
+    <constructor name="LeveldbIterator" type="org.iq80.leveldb.DB, org.iq80.leveldb.ReadOptions"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an iterator for the specified database]]>
+      </doc>
+    </constructor>
+    <constructor name="LeveldbIterator" type="org.iq80.leveldb.DBIterator"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Create an iterator using the specified underlying DBIterator]]>
+      </doc>
+    </constructor>
+    <method name="seek"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="key" type="byte[]"/>
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Repositions the iterator so the key of the next BlockElement
+ returned greater than or equal to the specified targetKey.]]>
+      </doc>
+    </method>
+    <method name="seekToFirst"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Repositions the iterator so is is at the beginning of the Database.]]>
+      </doc>
+    </method>
+    <method name="seekToLast"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Repositions the iterator so it is at the end of of the Database.]]>
+      </doc>
+    </method>
+    <method name="hasNext" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Returns <tt>true</tt> if the iteration has more elements.]]>
+      </doc>
+    </method>
+    <method name="next" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Returns the next element in the iteration.]]>
+      </doc>
+    </method>
+    <method name="peekNext" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Returns the next element in the iteration, without advancing the
+ iteration.]]>
+      </doc>
+    </method>
+    <method name="hasPrev" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[@return true if there is a previous entry in the iteration.]]>
+      </doc>
+    </method>
+    <method name="prev" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[@return the previous element in the iteration and rewinds the iteration.]]>
+      </doc>
+    </method>
+    <method name="peekPrev" return="java.util.Map.Entry"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[@return the previous element in the iteration, without rewinding the
+ iteration.]]>
+      </doc>
+    </method>
+    <method name="remove"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="DBException" type="org.iq80.leveldb.DBException"/>
+      <doc>
+      <![CDATA[Removes from the database the last element returned by the iterator.]]>
+      </doc>
+    </method>
+    <method name="close"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Closes the iterator.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[A wrapper for a DBIterator to translate the raw RuntimeExceptions that
+ can be thrown into DBExceptions.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.utils.LeveldbIterator -->
+</package>
+<package name="org.apache.hadoop.yarn.server.webapp">
+</package>
+<package name="org.apache.hadoop.yarn.server.webapp.dao">
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo -->
+  <class name="AppAttemptInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppAttemptInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AppAttemptInfo" type="org.apache.hadoop.yarn.api.records.ApplicationAttemptReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAppAttemptId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getHost" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRpcPort" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getOriginalTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDiagnosticsInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAppAttemptState" return="org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAmContainerId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStartedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinishedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="appAttemptId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="host" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="rpcPort" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="trackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="originalTrackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="diagnosticsInfo" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="appAttemptState" type="org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="amContainerId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="startedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="finishedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo -->
+  <class name="AppAttemptsInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppAttemptsInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="info" type="org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo"/>
+    </method>
+    <method name="getAttempts" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="attempt" type="java.util.ArrayList"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppAttemptsInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppInfo -->
+  <class name="AppInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="AppInfo" type="org.apache.hadoop.yarn.api.records.ApplicationReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getAppId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getCurrentAppAttemptId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getUser" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getName" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getQueue" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getType" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getHost" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRpcPort" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAppState" return="org.apache.hadoop.yarn.api.records.YarnApplicationState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getRunningContainers" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedCpuVcores" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedMemoryMB" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedGpus" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getReservedCpuVcores" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getReservedMemoryMB" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getReservedGpus" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getProgress" return="float"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDiagnosticsInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getOriginalTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getTrackingUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinalAppStatus" return="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getSubmittedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getLaunchTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStartedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinishedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getElapsedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getApplicationTags" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="isUnmanagedApp" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPriority" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAppNodeLabelExpression" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAmNodeLabelExpression" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAggregateResourceAllocation" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAggregatePreemptedResourceAllocation" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="appId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="currentAppAttemptId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="user" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="name" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="queue" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="type" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="host" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="rpcPort" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="appState" type="org.apache.hadoop.yarn.api.records.YarnApplicationState"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="runningContainers" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="progress" type="float"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="diagnosticsInfo" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="originalTrackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="trackingUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="finalAppStatus" type="org.apache.hadoop.yarn.api.records.FinalApplicationStatus"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="startedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="finishedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="elapsedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="applicationTags" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="priority" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="unmanagedApplication" type="boolean"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.AppsInfo -->
+  <class name="AppsInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="AppsInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="appinfo" type="org.apache.hadoop.yarn.server.webapp.dao.AppInfo"/>
+    </method>
+    <method name="getApps" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="app" type="java.util.ArrayList"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.AppsInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo -->
+  <class name="ContainerInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainerInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="ContainerInfo" type="org.apache.hadoop.yarn.api.records.ContainerReport"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getContainerId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedMB" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedVCores" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAssignedNodeId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getPriority" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getStartedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getFinishedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getElapsedTime" return="long"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getDiagnosticsInfo" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getLogUrl" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerExitStatus" return="int"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getContainerState" return="org.apache.hadoop.yarn.api.records.ContainerState"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNodeHttpAddress" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getNodeId" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="getAllocatedResources" return="java.util.Map"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <doc>
+      <![CDATA[Return a map of the allocated resources. The map key is the resource name,
+ and the value is the resource value.
+
+ @return the allocated resources map]]>
+      </doc>
+    </method>
+    <method name="getExposedPorts" return="java.lang.String"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <method name="hasCustomResources" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="containerId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="allocatedMB" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="allocatedVCores" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="assignedNodeId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="priority" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="startedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="finishedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="elapsedTime" type="long"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="diagnosticsInfo" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="logUrl" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="containerExitStatus" type="int"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="containerState" type="org.apache.hadoop.yarn.api.records.ContainerState"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="nodeHttpAddress" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="nodeId" type="java.lang.String"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+    <field name="allocatedResources" type="java.util.Map"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo -->
+  <!-- start class org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo -->
+  <class name="ContainersInfo" extends="java.lang.Object"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="ContainersInfo"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="add"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="containerInfo" type="org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo"/>
+    </method>
+    <method name="getContainers" return="java.util.ArrayList"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <field name="container" type="java.util.ArrayList"
+      transient="false" volatile="false"
+      static="false" final="false" visibility="protected"
+      deprecated="not deprecated">
+    </field>
+  </class>
+  <!-- end class org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo -->
+</package>
+
+</api>