| <?xml version="1.0" encoding="iso-8859-1" standalone="no"?> |
| <!-- Generated by the JDiff Javadoc doclet --> |
| <!-- (http://www.jdiff.org) --> |
| <!-- on Tue Jun 10 22:31:49 UTC 2008 --> |
| |
| <api |
| xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' |
| xsi:noNamespaceSchemaLocation='api.xsd' |
| name="hadoop 0.17.0" |
| jdversion="1.1.0"> |
| |
| <!-- Command line arguments = -doclet jdiff.JDiff -docletpath /home/oom/tools/src/jdiff-1.1.0-src/jdiff.jar:/home/oom/tools/src/jdiff-1.1.0-src/lib/xerces.jar -classpath /home/oom/work/eclipse/hadoop-17/lib/commons-cli-2.0-SNAPSHOT.jar:/home/oom/work/eclipse/hadoop-17/lib/commons-codec-1.3.jar:/home/oom/work/eclipse/hadoop-17/lib/commons-httpclient-3.0.1.jar:/home/oom/work/eclipse/hadoop-17/lib/commons-logging-1.0.4.jar:/home/oom/work/eclipse/hadoop-17/lib/commons-logging-api-1.0.4.jar:/home/oom/work/eclipse/hadoop-17/lib/jets3t-0.5.0.jar:/home/oom/work/eclipse/hadoop-17/lib/jetty-5.1.4.jar:/home/oom/work/eclipse/hadoop-17/lib/jetty-ext/commons-el.jar:/home/oom/work/eclipse/hadoop-17/lib/jetty-ext/jasper-compiler.jar:/home/oom/work/eclipse/hadoop-17/lib/jetty-ext/jasper-runtime.jar:/home/oom/work/eclipse/hadoop-17/lib/jetty-ext/jsp-api.jar:/home/oom/work/eclipse/hadoop-17/lib/junit-3.8.1.jar:/home/oom/work/eclipse/hadoop-17/lib/kfs-0.1.jar:/home/oom/work/eclipse/hadoop-17/lib/log4j-1.2.13.jar:/home/oom/work/eclipse/hadoop-17/lib/servlet-api.jar:/home/oom/work/eclipse/hadoop-17/lib/xmlenc-0.52.jar:/home/oom/work/eclipse/hadoop-17/conf:/usr/releng/share/java/ant/1.6.5/lib/ant-launcher.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-antlr.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-apache-bcel.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-apache-bsf.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-apache-log4j.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-apache-oro.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-apache-regexp.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-apache-resolver.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-commons-logging.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-commons-net.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-icontract.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-jai.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-javamail.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-jdepend.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-jmf.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-jsch.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-junit.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-netrexx.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-nodeps.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-starteam.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-stylebook.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-swing.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-trax.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-vaj.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-weblogic.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-xalan1.jar:/usr/releng/share/java/ant/1.6.5/lib/ant-xslp.jar:/usr/releng/share/java/ant/1.6.5/lib/ant.jar:/usr/releng/share/java/ant/1.6.5/lib/xercesImpl.jar:/usr/releng/share/java/ant/1.6.5/lib/xml-apis.jar:/usr/releng/share/java/ant/1.6.5/lib/junit-3.8.1.jar:/nfs/ystools/vol/ystools/releng/build/Linux_2.6_rh4_x86_64/tools/java/jdk1.6.0_i586/lib/tools.jar -sourcepath /home/oom/work/eclipse/hadoop-17/src/java -apidir /home/oom/work/eclipse/hadoop-17/build -apiname hadoop 0.17.0 --> |
| <package name="org.apache.hadoop"> |
| <!-- start class org.apache.hadoop.HadoopVersionAnnotation --> |
| <class name="HadoopVersionAnnotation" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.lang.annotation.Annotation"/> |
| <doc> |
| <![CDATA[A package attribute that captures the version of Hadoop that was compiled.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.HadoopVersionAnnotation --> |
| </package> |
| <package name="org.apache.hadoop.conf"> |
| <!-- start interface org.apache.hadoop.conf.Configurable --> |
| <interface name="Configurable" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Set the configuration to be used by this object.]]> |
| </doc> |
| </method> |
| <method name="getConf" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the configuration used by this object.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Something that may be configured with a {@link Configuration}.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.conf.Configurable --> |
| <!-- start class org.apache.hadoop.conf.Configuration --> |
| <class name="Configuration" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.lang.Iterable<java.util.Map.Entry<java.lang.String, java.lang.String>>"/> |
| <constructor name="Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[A new configuration.]]> |
| </doc> |
| </constructor> |
| <constructor name="Configuration" type="org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[A new configuration with the same settings cloned from another. |
| |
| @param other the configuration from which to clone settings.]]> |
| </doc> |
| </constructor> |
| <method name="addResource" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Add a configuration resource. |
| |
| The properties of this resource will override properties of previously |
| added resources, unless they were marked <a href="#Final">final</a>. |
| |
| @param name resource to be added, the classpath is examined for a file |
| with that name.]]> |
| </doc> |
| </method> |
| <method name="addResource" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="url" type="java.net.URL"/> |
| <doc> |
| <![CDATA[Add a configuration resource. |
| |
| The properties of this resource will override properties of previously |
| added resources, unless they were marked <a href="#Final">final</a>. |
| |
| @param url url of the resource to be added, the local filesystem is |
| examined directly to find the resource, without referring to |
| the classpath.]]> |
| </doc> |
| </method> |
| <method name="addResource" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="file" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Add a configuration resource. |
| |
| The properties of this resource will override properties of previously |
| added resources, unless they were marked <a href="#Final">final</a>. |
| |
| @param file file-path of resource to be added, the local filesystem is |
| examined directly to find the resource, without referring to |
| the classpath.]]> |
| </doc> |
| </method> |
| <method name="get" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Get the value of the <code>name</code> property, <code>null</code> if |
| no such property exists. |
| |
| Values are processed for <a href="#VariableExpansion">variable expansion</a> |
| before being returned. |
| |
| @param name the property name. |
| @return the value of the <code>name</code> property, |
| or null if no such property exists.]]> |
| </doc> |
| </method> |
| <method name="getRaw" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Get the value of the <code>name</code> property, without doing |
| <a href="#VariableExpansion">variable expansion</a>. |
| |
| @param name the property name. |
| @return the value of the <code>name</code> property, |
| or null if no such property exists.]]> |
| </doc> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="value" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the <code>value</code> of the <code>name</code> property. |
| |
| @param name property name. |
| @param value property value.]]> |
| </doc> |
| </method> |
| <method name="get" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="defaultValue" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Get the value of the <code>name</code> property. If no such property |
| exists, then <code>defaultValue</code> is returned. |
| |
| @param name property name. |
| @param defaultValue default value. |
| @return property value, or <code>defaultValue</code> if the property |
| doesn't exist.]]> |
| </doc> |
| </method> |
| <method name="getInt" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="defaultValue" type="int"/> |
| <doc> |
| <![CDATA[Get the value of the <code>name</code> property as an <code>int</code>. |
| |
| If no such property exists, or if the specified value is not a valid |
| <code>int</code>, then <code>defaultValue</code> is returned. |
| |
| @param name property name. |
| @param defaultValue default value. |
| @return property value as an <code>int</code>, |
| or <code>defaultValue</code>.]]> |
| </doc> |
| </method> |
| <method name="setInt" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="value" type="int"/> |
| <doc> |
| <![CDATA[Set the value of the <code>name</code> property to an <code>int</code>. |
| |
| @param name property name. |
| @param value <code>int</code> value of the property.]]> |
| </doc> |
| </method> |
| <method name="getLong" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="defaultValue" type="long"/> |
| <doc> |
| <![CDATA[Get the value of the <code>name</code> property as a <code>long</code>. |
| If no such property is specified, or if the specified value is not a valid |
| <code>long</code>, then <code>defaultValue</code> is returned. |
| |
| @param name property name. |
| @param defaultValue default value. |
| @return property value as a <code>long</code>, |
| or <code>defaultValue</code>.]]> |
| </doc> |
| </method> |
| <method name="setLong" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="value" type="long"/> |
| <doc> |
| <![CDATA[Set the value of the <code>name</code> property to a <code>long</code>. |
| |
| @param name property name. |
| @param value <code>long</code> value of the property.]]> |
| </doc> |
| </method> |
| <method name="getFloat" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="defaultValue" type="float"/> |
| <doc> |
| <![CDATA[Get the value of the <code>name</code> property as a <code>float</code>. |
| If no such property is specified, or if the specified value is not a valid |
| <code>float</code>, then <code>defaultValue</code> is returned. |
| |
| @param name property name. |
| @param defaultValue default value. |
| @return property value as a <code>float</code>, |
| or <code>defaultValue</code>.]]> |
| </doc> |
| </method> |
| <method name="getBoolean" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="defaultValue" type="boolean"/> |
| <doc> |
| <![CDATA[Get the value of the <code>name</code> property as a <code>boolean</code>. |
| If no such property is specified, or if the specified value is not a valid |
| <code>boolean</code>, then <code>defaultValue</code> is returned. |
| |
| @param name property name. |
| @param defaultValue default value. |
| @return property value as a <code>boolean</code>, |
| or <code>defaultValue</code>.]]> |
| </doc> |
| </method> |
| <method name="setBoolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="value" type="boolean"/> |
| <doc> |
| <![CDATA[Set the value of the <code>name</code> property to a <code>boolean</code>. |
| |
| @param name property name. |
| @param value <code>boolean</code> value of the property.]]> |
| </doc> |
| </method> |
| <method name="getRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="defaultValue" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Parse the given attribute as a set of integer ranges |
| @param name the attribute name |
| @param defaultValue the default value if it is not set |
| @return a new set of ranges from the configured value]]> |
| </doc> |
| </method> |
| <method name="getStrings" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Get the comma delimited values of the <code>name</code> property as |
| an array of <code>String</code>s. |
| If no such property is specified then <code>null</code> is returned. |
| |
| @param name property name. |
| @return property value as an array of <code>String</code>s, |
| or <code>null</code>.]]> |
| </doc> |
| </method> |
| <method name="getStrings" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="defaultValue" type="java.lang.String[]"/> |
| <doc> |
| <![CDATA[Get the comma delimited values of the <code>name</code> property as |
| an array of <code>String</code>s. |
| If no such property is specified then default value is returned. |
| |
| @param name property name. |
| @param defaultValue The default value |
| @return property value as an array of <code>String</code>s, |
| or default value.]]> |
| </doc> |
| </method> |
| <method name="setStrings" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="values" type="java.lang.String[]"/> |
| <doc> |
| <![CDATA[Set the array of string values for the <code>name</code> property as |
| as comma delimited values. |
| |
| @param name property name. |
| @param values The values]]> |
| </doc> |
| </method> |
| <method name="getClassByName" return="java.lang.Class<?>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/> |
| <doc> |
| <![CDATA[Load a class by name. |
| |
| @param name the class name. |
| @return the class object. |
| @throws ClassNotFoundException if the class is not found.]]> |
| </doc> |
| </method> |
| <method name="getClass" return="java.lang.Class<?>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="defaultValue" type="java.lang.Class<?>"/> |
| <doc> |
| <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code>. |
| If no such property is specified, then <code>defaultValue</code> is |
| returned. |
| |
| @param name the class name. |
| @param defaultValue default value. |
| @return property value as a <code>Class</code>, |
| or <code>defaultValue</code>.]]> |
| </doc> |
| </method> |
| <method name="getClass" return="java.lang.Class<? extends U>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="defaultValue" type="java.lang.Class<? extends U>"/> |
| <param name="xface" type="java.lang.Class<U>"/> |
| <doc> |
| <![CDATA[Get the value of the <code>name</code> property as a <code>Class</code> |
| implementing the interface specified by <code>xface</code>. |
| |
| If no such property is specified, then <code>defaultValue</code> is |
| returned. |
| |
| An exception is thrown if the returned class does not implement the named |
| interface. |
| |
| @param name the class name. |
| @param defaultValue default value. |
| @param xface the interface implemented by the named class. |
| @return property value as a <code>Class</code>, |
| or <code>defaultValue</code>.]]> |
| </doc> |
| </method> |
| <method name="setClass" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="theClass" type="java.lang.Class<?>"/> |
| <param name="xface" type="java.lang.Class<?>"/> |
| <doc> |
| <![CDATA[Set the value of the <code>name</code> property to the name of a |
| <code>theClass</code> implementing the given interface <code>xface</code>. |
| |
| An exception is thrown if <code>theClass</code> does not implement the |
| interface <code>xface</code>. |
| |
| @param name property name. |
| @param theClass property value. |
| @param xface the interface implemented by the named class.]]> |
| </doc> |
| </method> |
| <method name="getLocalPath" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dirsProp" type="java.lang.String"/> |
| <param name="path" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get a local file under a directory named by <i>dirsProp</i> with |
| the given <i>path</i>. If <i>dirsProp</i> contains multiple directories, |
| then one is chosen based on <i>path</i>'s hash code. If the selected |
| directory does not exist, an attempt is made to create it. |
| |
| @param dirsProp directory in which to locate the file. |
| @param path file-path. |
| @return local file under the directory with the given path.]]> |
| </doc> |
| </method> |
| <method name="getFile" return="java.io.File" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dirsProp" type="java.lang.String"/> |
| <param name="path" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get a local file name under a directory named in <i>dirsProp</i> with |
| the given <i>path</i>. If <i>dirsProp</i> contains multiple directories, |
| then one is chosen based on <i>path</i>'s hash code. If the selected |
| directory does not exist, an attempt is made to create it. |
| |
| @param dirsProp directory in which to locate the file. |
| @param path file-path. |
| @return local file under the directory with the given path.]]> |
| </doc> |
| </method> |
| <method name="getResource" return="java.net.URL" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Get the {@link URL} for the named resource. |
| |
| @param name resource name. |
| @return the url for the named resource.]]> |
| </doc> |
| </method> |
| <method name="getConfResourceAsInputStream" return="java.io.InputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Get an input stream attached to the configuration resource with the |
| given <code>name</code>. |
| |
| @param name configuration resource name. |
| @return an input stream attached to the resource.]]> |
| </doc> |
| </method> |
| <method name="getConfResourceAsReader" return="java.io.Reader" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Get a {@link Reader} attached to the configuration resource with the |
| given <code>name</code>. |
| |
| @param name configuration resource name. |
| @return a reader attached to the resource.]]> |
| </doc> |
| </method> |
| <method name="iterator" return="java.util.Iterator<java.util.Map.Entry<java.lang.String, java.lang.String>>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get an {@link Iterator} to go through the list of <code>String</code> |
| key-value pairs in the configuration. |
| |
| @return an iterator over the entries.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.OutputStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write out the non-default properties in this configuration to the give |
| {@link OutputStream}. |
| |
| @param out the output stream to write to.]]> |
| </doc> |
| </method> |
| <method name="getClassLoader" return="java.lang.ClassLoader" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the {@link ClassLoader} for this job. |
| |
| @return the correct class loader.]]> |
| </doc> |
| </method> |
| <method name="setClassLoader" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="classLoader" type="java.lang.ClassLoader"/> |
| <doc> |
| <![CDATA[Set the class loader that will be used to load the various objects. |
| |
| @param classLoader the new class loader.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="setQuietMode" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="quietmode" type="boolean"/> |
| <doc> |
| <![CDATA[Set the quiteness-mode. |
| |
| In the quite-mode error and informational messages might not be logged. |
| |
| @param quietmode <code>true</code> to set quiet-mode on, <code>false</code> |
| to turn it off.]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[For debugging. List non-default properties to the terminal and exit.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Provides access to configuration parameters. |
| |
| <h4 id="Resources">Resources</h4> |
| |
| <p>Configurations are specified by resources. A resource contains a set of |
| name/value pairs as XML data. Each resource is named by either a |
| <code>String</code> or by a {@link Path}. If named by a <code>String</code>, |
| then the classpath is examined for a file with that name. If named by a |
| <code>Path</code>, then the local filesystem is examined directly, without |
| referring to the classpath. |
| |
| <p>Hadoop by default specifies two resources, loaded in-order from the |
| classpath: <ol> |
| <li><tt><a href="{@docRoot}/../hadoop-default.html">hadoop-default.xml</a> |
| </tt>: Read-only defaults for hadoop.</li> |
| <li><tt>hadoop-site.xml</tt>: Site-specific configuration for a given hadoop |
| installation.</li> |
| </ol> |
| Applications may add additional resources, which are loaded |
| subsequent to these resources in the order they are added. |
| |
| <h4 id="FinalParams">Final Parameters</h4> |
| |
| <p>Configuration parameters may be declared <i>final</i>. |
| Once a resource declares a value final, no subsequently-loaded |
| resource can alter that value. |
| For example, one might define a final parameter with: |
| <tt><pre> |
| <property> |
| <name>dfs.client.buffer.dir</name> |
| <value>/tmp/hadoop/dfs/client</value> |
| <b><final>true</final></b> |
| </property></pre></tt> |
| |
| Administrators typically define parameters as final in |
| <tt>hadoop-site.xml</tt> for values that user applications may not alter. |
| |
| <h4 id="VariableExpansion">Variable Expansion</h4> |
| |
| <p>Value strings are first processed for <i>variable expansion</i>. The |
| available properties are:<ol> |
| <li>Other properties defined in this Configuration; and, if a name is |
| undefined here,</li> |
| <li>Properties in {@link System#getProperties()}.</li> |
| </ol> |
| |
| <p>For example, if a configuration resource contains the following property |
| definitions: |
| <tt><pre> |
| <property> |
| <name>basedir</name> |
| <value>/user/${<i>user.name</i>}</value> |
| </property> |
| |
| <property> |
| <name>tempdir</name> |
| <value>${<i>basedir</i>}/tmp</value> |
| </property></pre></tt> |
| |
| When <tt>conf.get("tempdir")</tt> is called, then <tt>${<i>basedir</i>}</tt> |
| will be resolved to another property in this Configuration, while |
| <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value |
| of the System property with that name.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.conf.Configuration --> |
| <!-- start class org.apache.hadoop.conf.Configuration.IntegerRanges --> |
| <class name="Configuration.IntegerRanges" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Configuration.IntegerRanges" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="Configuration.IntegerRanges" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="isIncluded" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="int"/> |
| <doc> |
| <![CDATA[Is the given value in the set of ranges |
| @param value the value to check |
| @return is the value in the ranges?]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[A class that represents a set of positive integer ranges. It parses |
| strings of the form: "2-3,5,7-" where ranges are separated by comma and |
| the lower/upper bounds are separated by dash. Either the lower or upper |
| bound may be omitted meaning all values up to or over. So the string |
| above means 2, 3, 5, and 7, 8, 9, ...]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.conf.Configuration.IntegerRanges --> |
| <!-- start class org.apache.hadoop.conf.Configured --> |
| <class name="Configured" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.conf.Configurable"/> |
| <constructor name="Configured" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a Configured.]]> |
| </doc> |
| </constructor> |
| <constructor name="Configured" type="org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a Configured.]]> |
| </doc> |
| </constructor> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| </method> |
| <method name="getConf" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[Base class for things that may be configured with a {@link Configuration}.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.conf.Configured --> |
| <doc> |
| <![CDATA[Configuration of system parameters.]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.dfs"> |
| <!-- start class org.apache.hadoop.dfs.AlreadyBeingCreatedException --> |
| <class name="AlreadyBeingCreatedException" extends="java.io.IOException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="AlreadyBeingCreatedException" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[The exception that happens when you ask to create a file that already |
| is being created, but is not closed yet.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.AlreadyBeingCreatedException --> |
| <!-- start class org.apache.hadoop.dfs.Balancer --> |
| <class name="Balancer" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.util.Tool"/> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <doc> |
| <![CDATA[Run a balancer |
| @param args]]> |
| </doc> |
| </method> |
| <method name="run" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[main method of Balancer |
| @param args arguments to a Balancer |
| @exception any exception occurs during datanode balancing]]> |
| </doc> |
| </method> |
| <method name="getConf" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[return this balancer's configuration]]> |
| </doc> |
| </method> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[set this balancer's configuration]]> |
| </doc> |
| </method> |
| <field name="SUCCESS" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="ALREADY_RUNNING" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="NO_MOVE_BLOCK" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="NO_MOVE_PROGRESS" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="IO_EXCEPTION" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="ILLEGAL_ARGS" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[<p>The balancer is a tool that balances disk space usage on an HDFS cluster |
| when some datanodes become full or when new empty nodes join the cluster. |
| The tool is deployed as an application program that can be run by the |
| cluster administrator on a live HDFS cluster while applications |
| adding and deleting files. |
| |
| <p>SYNOPSIS |
| <pre> |
| To start: |
| bin/start-balancer.sh [-threshold <threshold>] |
| Example: bin/ start-balancer.sh |
| start the balancer with a default threshold of 10% |
| bin/ start-balancer.sh -threshold 5 |
| start the balancer with a threshold of 5% |
| To stop: |
| bin/ stop-balancer.sh |
| </pre> |
| |
| <p>DESCRIPTION |
| <p>The threshold parameter is a fraction in the range of (0%, 100%) with a |
| default value of 10%. The threshold sets a target for whether the cluster |
| is balanced. A cluster is balanced if for each datanode, the utilization |
| of the node (ratio of used space at the node to total capacity of the node) |
| differs from the utilization of the (ratio of used space in the cluster |
| to total capacity of the cluster) by no more than the threshold value. |
| The smaller the threshold, the more balanced a cluster will become. |
| It takes more time to run the balancer for small threshold values. |
| Also for a very small threshold the cluster may not be able to reach the |
| balanced state when applications write and delete files concurrently. |
| |
| <p>The tool moves blocks from highly utilized datanodes to poorly |
| utilized datanodes iteratively. In each iteration a datanode moves or |
| receives no more than the lesser of 10G bytes or the threshold fraction |
| of its capacity. Each iteration runs no more than 20 minutes. |
| At the end of each iteration, the balancer obtains updated datanodes |
| information from the namenode. |
| |
| <p>A system property that limits the balancer's use of bandwidth is |
| defined in the default configuration file: |
| <pre> |
| <property> |
| <name>dfs.balance.bandwidthPerSec</name> |
| <value>1048576</value> |
| <description> Specifies the maximum bandwidth that each datanode |
| can utilize for the balancing purpose in term of the number of bytes |
| per second. </description> |
| </property> |
| </pre> |
| |
| <p>This property determines the maximum speed at which a block will be |
| moved from one datanode to another. The default value is 1MB/s. The higher |
| the bandwidth, the faster a cluster can reach the balanced state, |
| but with greater competition with application processes. If an |
| administrator changes the value of this property in the configuration |
| file, the change is observed when HDFS is next restarted. |
| |
| <p>MONITERING BALANCER PROGRESS |
| <p>After the balancer is started, an output file name where the balancer |
| progress will be recorded is printed on the screen. The administrator |
| can monitor the running of the balancer by reading the output file. |
| The output shows the balancer's status iteration by iteration. In each |
| iteration it prints the starting time, the iteration number, the total |
| number of bytes that have been moved in the previous iterations, |
| the total number of bytes that are left to move in order for the cluster |
| to be balanced, and the number of bytes that are being moved in this |
| iteration. Normally "Bytes Already Moved" is increasing while "Bytes Left |
| To Move" is decreasing. |
| |
| <p>Running multiple instances of the balancer in an HDFS cluster is |
| prohibited by the tool. |
| |
| <p>The balancer automatically exits when any of the following five |
| conditions is satisfied: |
| <ol> |
| <li>The cluster is balanced; |
| <li>No block can be moved; |
| <li>No block has been moved for five consecutive iterations; |
| <li>An IOException occurs while communicating with the namenode; |
| <li>Another balancer is running. |
| </ol> |
| |
| <p>Upon exit, a balancer returns an exit code and prints one of the |
| following messages to the output file in corresponding to the above exit |
| reasons: |
| <ol> |
| <li>The cluster is balanced. Exiting |
| <li>No block can be moved. Exiting... |
| <li>No block has been moved for 3 iterations. Exiting... |
| <li>Received an IO exception: failure reason. Exiting... |
| <li>Another balancer is running. Exiting... |
| </ol> |
| |
| <p>The administrator can interrupt the execution of the balancer at any |
| time by running the command "stop-balancer.sh" on the machine where the |
| balancer is running.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.Balancer --> |
| <!-- start class org.apache.hadoop.dfs.ChecksumDistributedFileSystem --> |
| <class name="ChecksumDistributedFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ChecksumDistributedFileSystem" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="ChecksumDistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="deprecated, no comment"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@deprecated]]> |
| </doc> |
| </constructor> |
| <method name="getContentLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getRawCapacity" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the total raw capacity of the filesystem, disregarding |
| replication .]]> |
| </doc> |
| </method> |
| <method name="getRawUsed" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the total raw used space in the filesystem, disregarding |
| replication .]]> |
| </doc> |
| </method> |
| <method name="getDataNodeStats" return="org.apache.hadoop.dfs.DatanodeInfo[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return statistics for each datanode.]]> |
| </doc> |
| </method> |
| <method name="setSafeMode" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="action" type="org.apache.hadoop.dfs.FSConstants.SafeModeAction"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Enter, leave or get safe mode. |
| |
| @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)]]> |
| </doc> |
| </method> |
| <method name="refreshNodes" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="finalizeUpgrade" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Finalize previously upgraded files system state.]]> |
| </doc> |
| </method> |
| <method name="distributedUpgradeProgress" return="org.apache.hadoop.dfs.UpgradeStatusReport" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="action" type="org.apache.hadoop.dfs.FSConstants.UpgradeAction"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="metaSave" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="pathname" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="reportChecksumFailure" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/> |
| <param name="inPos" type="long"/> |
| <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/> |
| <param name="sumsPos" type="long"/> |
| <doc> |
| <![CDATA[We need to find the blocks that didn't match. Likely only one |
| is corrupt but we will report both to the namenode. In the future, |
| we can consider figuring out exactly which block is corrupt.]]> |
| </doc> |
| </method> |
| <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the stat information about the file.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[An implementation of ChecksumFileSystem over DistributedFileSystem. |
| Note that as of now (May 07), DistributedFileSystem natively checksums |
| all of its data. Using this class is not be necessary in most cases. |
| Currently provided mainly for backward compatibility and testing.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.ChecksumDistributedFileSystem --> |
| <!-- start class org.apache.hadoop.dfs.DataBlockScanner --> |
| <class name="DataBlockScanner" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.lang.Runnable"/> |
| <method name="run" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.DataBlockScanner --> |
| <!-- start class org.apache.hadoop.dfs.DataBlockScanner.Servlet --> |
| <class name="DataBlockScanner.Servlet" extends="javax.servlet.http.HttpServlet" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="DataBlockScanner.Servlet" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="doGet" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="request" type="javax.servlet.http.HttpServletRequest"/> |
| <param name="response" type="javax.servlet.http.HttpServletResponse"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.DataBlockScanner.Servlet --> |
| <!-- start class org.apache.hadoop.dfs.DataChecksum --> |
| <class name="DataChecksum" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.util.zip.Checksum"/> |
| <method name="newDataChecksum" return="org.apache.hadoop.dfs.DataChecksum" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="type" type="int"/> |
| <param name="bytesPerChecksum" type="int"/> |
| </method> |
| <method name="newDataChecksum" return="org.apache.hadoop.dfs.DataChecksum" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <doc> |
| <![CDATA[Creates a DataChecksum from HEADER_LEN bytes from arr[offset]. |
| @return DataChecksum of the type in the array or null in case of an error.]]> |
| </doc> |
| </method> |
| <method name="newDataChecksum" return="org.apache.hadoop.dfs.DataChecksum" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInputStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[This constructucts a DataChecksum by reading HEADER_LEN bytes from |
| input stream <i>in</i>]]> |
| </doc> |
| </method> |
| <method name="writeHeader" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutputStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Writes the checksum header to the output stream <i>out</i>.]]> |
| </doc> |
| </method> |
| <method name="getHeader" return="byte[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="writeValue" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutputStream"/> |
| <param name="reset" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Writes the current checksum to the stream. |
| If <i>reset</i> is true, then resets the checksum. |
| @return number of bytes written. Will be equal to getChecksumSize();]]> |
| </doc> |
| </method> |
| <method name="writeValue" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="buf" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="reset" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Writes the current checksum to a buffer. |
| If <i>reset</i> is true, then resets the checksum. |
| @return number of bytes written. Will be equal to getChecksumSize();]]> |
| </doc> |
| </method> |
| <method name="compare" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="buf" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <doc> |
| <![CDATA[Compares the checksum located at buf[offset] with the current checksum. |
| @return true if the checksum matches and false otherwise.]]> |
| </doc> |
| </method> |
| <method name="getChecksumType" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getChecksumSize" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getBytesPerChecksum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getNumBytesInSum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getChecksumHeaderSize" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getValue" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="update" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| </method> |
| <method name="update" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="int"/> |
| </method> |
| <field name="HEADER_LEN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="CHECKSUM_NULL" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="CHECKSUM_CRC32" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[This class provides inteface and utilities for processing checksums for |
| DFS data transfers.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.DataChecksum --> |
| <!-- start class org.apache.hadoop.dfs.DataNode --> |
| <class name="DataNode" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.dfs.FSConstants"/> |
| <implements name="java.lang.Runnable"/> |
| <method name="createSocketAddr" return="java.net.InetSocketAddress" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="target" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Use {@link NetUtils#createSocketAddr(String)} instead.]]> |
| </doc> |
| </method> |
| <method name="getDataNode" return="org.apache.hadoop.dfs.DataNode" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the DataNode object]]> |
| </doc> |
| </method> |
| <method name="getNameNodeAddr" return="java.net.InetSocketAddress" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getSelfAddr" return="java.net.InetSocketAddress" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getNamenode" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the namenode's identifier]]> |
| </doc> |
| </method> |
| <method name="shutdown" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Shut down this instance of the datanode. |
| Returns only after shutdown is complete.]]> |
| </doc> |
| </method> |
| <method name="offerService" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[Main loop for the DataNode. Runs until shutdown, |
| forever calling remote NameNode functions.]]> |
| </doc> |
| </method> |
| <method name="run" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[No matter what kind of exception we get, keep retrying to offerService(). |
| That's the loop that connects to the NameNode and provides basic DataNode |
| functionality. |
| |
| Only stop when "shouldRun" is turned off (which can only happen at shutdown).]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="scheduleBlockReport" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="delay" type="long"/> |
| <doc> |
| <![CDATA[This methods arranges for the data node to send the block report at the next heartbeat.]]> |
| </doc> |
| </method> |
| <method name="getFSDataset" return="org.apache.hadoop.dfs.FSDatasetInterface" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[This method is used for testing. |
| Examples are adding and deleting blocks directly. |
| The most common usage will be when the data node's storage is similated. |
| |
| @return the fsdataset that stores the blocks]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[DataNode is a class (and program) that stores a set of |
| blocks for a DFS deployment. A single deployment can |
| have one or many DataNodes. Each DataNode communicates |
| regularly with a single NameNode. It also communicates |
| with client code and other DataNodes from time to time. |
| |
| DataNodes store a series of named blocks. The DataNode |
| allows client code to read these blocks, or to write new |
| block data. The DataNode may also, in response to instructions |
| from its NameNode, delete blocks or copy blocks to/from other |
| DataNodes. |
| |
| The DataNode maintains just one critical table: |
| block-> stream of bytes (of BLOCK_SIZE or less) |
| |
| This info is stored on a local disk. The DataNode |
| reports the table's contents to the NameNode upon startup |
| and every so often afterwards. |
| |
| DataNodes spend their lives in an endless loop of asking |
| the NameNode for something to do. A NameNode cannot connect |
| to a DataNode directly; a NameNode simply returns values from |
| functions invoked by a DataNode. |
| |
| DataNodes maintain an open server socket so that client code |
| or other DataNodes can read/write data. The host/port for |
| this server is reported to the NameNode, which then sends that |
| information to clients or other DataNodes that might be interested.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.DataNode --> |
| <!-- start class org.apache.hadoop.dfs.DatanodeDescriptor --> |
| <class name="DatanodeDescriptor" extends="org.apache.hadoop.dfs.DatanodeInfo" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="DatanodeDescriptor" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Default constructor]]> |
| </doc> |
| </constructor> |
| <constructor name="DatanodeDescriptor" type="org.apache.hadoop.dfs.DatanodeID" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[DatanodeDescriptor constructor |
| @param nodeID id of the data node]]> |
| </doc> |
| </constructor> |
| <constructor name="DatanodeDescriptor" type="org.apache.hadoop.dfs.DatanodeID, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[DatanodeDescriptor constructor |
| |
| @param nodeID id of the data node |
| @param networkLocation location of the data node in network]]> |
| </doc> |
| </constructor> |
| <constructor name="DatanodeDescriptor" type="org.apache.hadoop.dfs.DatanodeID, java.lang.String, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[DatanodeDescriptor constructor |
| |
| @param nodeID id of the data node |
| @param networkLocation location of the data node in network |
| @param hostName it could be different from host specified for DatanodeID]]> |
| </doc> |
| </constructor> |
| <constructor name="DatanodeDescriptor" type="org.apache.hadoop.dfs.DatanodeID, long, long, long, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[DatanodeDescriptor constructor |
| |
| @param nodeID id of the data node |
| @param capacity capacity of the data node |
| @param dfsUsed space used by the data node |
| @param remaining remaing capacity of the data node |
| @param xceiverCount # of data transfers at the data node]]> |
| </doc> |
| </constructor> |
| <constructor name="DatanodeDescriptor" type="org.apache.hadoop.dfs.DatanodeID, java.lang.String, java.lang.String, long, long, long, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[DatanodeDescriptor constructor |
| |
| @param nodeID id of the data node |
| @param networkLocation location of the data node in network |
| @param capacity capacity of the data node, including space used by non-dfs |
| @param dfsUsed the used space by dfs datanode |
| @param remaining remaing capacity of the data node |
| @param xceiverCount # of data transfers at the data node]]> |
| </doc> |
| </constructor> |
| <field name="isAlive" type="boolean" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[DatanodeDescriptor tracks stats on a given DataNode, |
| such as available storage capacity, last update time, etc., |
| and maintains a set of blocks stored on the datanode. |
| |
| This data structure is a data structure that is internal |
| to the namenode. It is *not* sent over-the-wire to the Client |
| or the Datnodes. Neither is it stored persistently in the |
| fsImage.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.DatanodeDescriptor --> |
| <!-- start class org.apache.hadoop.dfs.DatanodeID --> |
| <class name="DatanodeID" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.WritableComparable"/> |
| <constructor name="DatanodeID" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[DatanodeID default constructor]]> |
| </doc> |
| </constructor> |
| <constructor name="DatanodeID" type="org.apache.hadoop.dfs.DatanodeID" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[DatanodeID copy constructor |
| |
| @param from]]> |
| </doc> |
| </constructor> |
| <constructor name="DatanodeID" type="java.lang.String, java.lang.String, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create DatanodeID |
| |
| @param nodeName (hostname:portNumber) |
| @param storageID data storage ID]]> |
| </doc> |
| </constructor> |
| <method name="getName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return hostname:portNumber.]]> |
| </doc> |
| </method> |
| <method name="getStorageID" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return data storage ID.]]> |
| </doc> |
| </method> |
| <method name="getInfoPort" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return infoPort (the port at which the HTTP server bound to)]]> |
| </doc> |
| </method> |
| <method name="getHost" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return hostname and no :portNumber.]]> |
| </doc> |
| </method> |
| <method name="getPort" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="to" type="java.lang.Object"/> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Comparable. |
| Basis of compare is the String name (host:portNumber) only. |
| @param o |
| @return as specified by Comparable.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <field name="name" type="java.lang.String" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="storageID" type="java.lang.String" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="infoPort" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[DatanodeID is composed of the data node |
| name (hostname:portNumber) and the data storage ID, |
| which it currently represents.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.DatanodeID --> |
| <!-- start class org.apache.hadoop.dfs.DatanodeInfo --> |
| <class name="DatanodeInfo" extends="org.apache.hadoop.dfs.DatanodeID" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.net.Node"/> |
| <method name="getCapacity" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The raw capacity.]]> |
| </doc> |
| </method> |
| <method name="getDfsUsed" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The used space by the data node.]]> |
| </doc> |
| </method> |
| <method name="getRemaining" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The raw free space.]]> |
| </doc> |
| </method> |
| <method name="getLastUpdate" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The time when this information was accurate.]]> |
| </doc> |
| </method> |
| <method name="getXceiverCount" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[number of active connections]]> |
| </doc> |
| </method> |
| <method name="getNetworkLocation" return="java.lang.String" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[rack name]]> |
| </doc> |
| </method> |
| <method name="setNetworkLocation" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="location" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Sets the rack name]]> |
| </doc> |
| </method> |
| <method name="getHostName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="setHostName" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="host" type="java.lang.String"/> |
| </method> |
| <method name="getDatanodeReport" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[A formatted string for reporting the status of the DataNode.]]> |
| </doc> |
| </method> |
| <method name="getParent" return="org.apache.hadoop.net.Node" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return this node's parent]]> |
| </doc> |
| </method> |
| <method name="setParent" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="parent" type="org.apache.hadoop.net.Node"/> |
| </method> |
| <method name="getLevel" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return this node's level in the tree. |
| E.g. the root of a tree returns 0 and its children return 1]]> |
| </doc> |
| </method> |
| <method name="setLevel" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="level" type="int"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <field name="capacity" type="long" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="dfsUsed" type="long" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="remaining" type="long" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="lastUpdate" type="long" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="xceiverCount" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="adminState" type="org.apache.hadoop.dfs.DatanodeInfo.AdminStates" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[DatanodeInfo represents the status of a DataNode. |
| This object is used for communication in the |
| Datanode Protocol and the Client Protocol.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.DatanodeInfo --> |
| <!-- start class org.apache.hadoop.dfs.DatanodeInfo.AdminStates --> |
| <class name="DatanodeInfo.AdminStates" extends="java.lang.Enum<org.apache.hadoop.dfs.DatanodeInfo.AdminStates>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.dfs.DatanodeInfo.AdminStates[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.dfs.DatanodeInfo.AdminStates" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.DatanodeInfo.AdminStates --> |
| <!-- start class org.apache.hadoop.dfs.DFSAdmin --> |
| <class name="DFSAdmin" extends="org.apache.hadoop.fs.FsShell" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="DFSAdmin" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a DFSAdmin object.]]> |
| </doc> |
| </constructor> |
| <constructor name="DFSAdmin" type="org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a DFSAdmin object.]]> |
| </doc> |
| </constructor> |
| <method name="report" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Gives a report on how the FileSystem is doing. |
| @exception IOException if the filesystem does not exist.]]> |
| </doc> |
| </method> |
| <method name="setSafeMode" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <param name="idx" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Safe mode maintenance command. |
| Usage: java DFSAdmin -safemode [enter | leave | get] |
| @param argv List of of command line parameters. |
| @param idx The index of the command that is being processed. |
| @exception IOException if the filesystem does not exist.]]> |
| </doc> |
| </method> |
| <method name="refreshNodes" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Command to ask the namenode to reread the hosts and excluded hosts |
| file. |
| Usage: java DFSAdmin -refreshNodes |
| @exception IOException]]> |
| </doc> |
| </method> |
| <method name="finalizeUpgrade" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Command to ask the namenode to finalize previously performed upgrade. |
| Usage: java DFSAdmin -finalizeUpgrade |
| @exception IOException]]> |
| </doc> |
| </method> |
| <method name="upgradeProgress" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <param name="idx" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Command to request current distributed upgrade status, |
| a detailed status, or to force the upgrade to proceed. |
| |
| Usage: java DFSAdmin -upgradeProgress [status | details | force] |
| @exception IOException]]> |
| </doc> |
| </method> |
| <method name="metaSave" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <param name="idx" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Dumps DFS data structures into specified file. |
| Usage: java DFSAdmin -metasave filename |
| @param argv List of of command line parameters. |
| @param idx The index of the command that is being processed. |
| @exception IOException if an error accoured wile accessing |
| the file or path.]]> |
| </doc> |
| </method> |
| <method name="printUsage" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="cmd" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Displays format of commands. |
| @param cmd The command that is being executed.]]> |
| </doc> |
| </method> |
| <method name="run" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[@param argv The parameters passed to this program. |
| @exception Exception if the filesystem does not exist. |
| @return 0 on success, non zero on error.]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[main() has some simple utility methods. |
| @param argv Command line parameters. |
| @exception Exception if the filesystem does not exist.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class provides some DFS administrative access.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.DFSAdmin --> |
| <!-- start class org.apache.hadoop.dfs.DFSck --> |
| <class name="DFSck" extends="org.apache.hadoop.conf.Configured" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.util.Tool"/> |
| <constructor name="DFSck" type="org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[Filesystem checker. |
| @param conf current Configuration |
| @throws Exception]]> |
| </doc> |
| </constructor> |
| <method name="run" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[@param args]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| </method> |
| <doc> |
| <![CDATA[This class provides rudimentary checking of DFS volumes for errors and |
| sub-optimal conditions. |
| <p>The tool scans all files and directories, starting from an indicated |
| root path. The following abnormal conditions are detected and handled:</p> |
| <ul> |
| <li>files with blocks that are completely missing from all datanodes.<br/> |
| In this case the tool can perform one of the following actions: |
| <ul> |
| <li>none ({@link NamenodeFsck#FIXING_NONE})</li> |
| <li>move corrupted files to /lost+found directory on DFS |
| ({@link NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a |
| block chains, representing longest consecutive series of valid blocks.</li> |
| <li>delete corrupted files ({@link NamenodeFsck#FIXING_DELETE})</li> |
| </ul> |
| </li> |
| <li>detect files with under-replicated or over-replicated blocks</li> |
| </ul> |
| Additionally, the tool collects a detailed overall DFS statistics, and |
| optionally can print detailed statistics on block locations and replication |
| factors of each file.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.DFSck --> |
| <!-- start class org.apache.hadoop.dfs.DistributedFileSystem --> |
| <class name="DistributedFileSystem" extends="org.apache.hadoop.fs.FileSystem" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="DistributedFileSystem" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="DistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="deprecated, no comment"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@deprecated]]> |
| </doc> |
| </constructor> |
| <method name="getName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="deprecated, no comment"> |
| <doc> |
| <![CDATA[@deprecated]]> |
| </doc> |
| </method> |
| <method name="getUri" return="java.net.URI" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="initialize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="uri" type="java.net.URI"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getDefaultBlockSize" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getDefaultReplication" return="short" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="setWorkingDirectory" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dir" type="org.apache.hadoop.fs.Path"/> |
| </method> |
| <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="start" type="long"/> |
| <param name="len" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="setVerifyChecksum" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="verifyChecksum" type="boolean"/> |
| </method> |
| <method name="open" return="org.apache.hadoop.fs.FSDataInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="bufferSize" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="bufferSize" type="int"/> |
| <param name="replication" type="short"/> |
| <param name="blockSize" type="long"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="setReplication" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="replication" type="short"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="rename" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Rename files/dirs]]> |
| </doc> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get rid of Path f, whether a true file or dir.]]> |
| </doc> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="recursive" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[requires a boolean check to delete a non |
| empty directory recursively.]]> |
| </doc> |
| </method> |
| <method name="getContentLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="p" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="mkdirs" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getDiskStatus" return="org.apache.hadoop.dfs.DistributedFileSystem.DiskStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the disk usage of the filesystem, including total capacity, |
| used space, and remaining space]]> |
| </doc> |
| </method> |
| <method name="getRawCapacity" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the total raw capacity of the filesystem, disregarding |
| replication .]]> |
| </doc> |
| </method> |
| <method name="getRawUsed" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the total raw used space in the filesystem, disregarding |
| replication .]]> |
| </doc> |
| </method> |
| <method name="getDataNodeStats" return="org.apache.hadoop.dfs.DatanodeInfo[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return statistics for each datanode.]]> |
| </doc> |
| </method> |
| <method name="setSafeMode" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="action" type="org.apache.hadoop.dfs.FSConstants.SafeModeAction"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Enter, leave or get safe mode. |
| |
| @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode( |
| FSConstants.SafeModeAction)]]> |
| </doc> |
| </method> |
| <method name="refreshNodes" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="finalizeUpgrade" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Finalize previously upgraded files system state. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="distributedUpgradeProgress" return="org.apache.hadoop.dfs.UpgradeStatusReport" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="action" type="org.apache.hadoop.dfs.FSConstants.UpgradeAction"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="metaSave" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="pathname" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="reportChecksumFailure" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/> |
| <param name="inPos" type="long"/> |
| <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/> |
| <param name="sumsPos" type="long"/> |
| <doc> |
| <![CDATA[We need to find the blocks that didn't match. Likely only one |
| is corrupt but we will report both to the namenode. In the future, |
| we can consider figuring out exactly which block is corrupt.]]> |
| </doc> |
| </method> |
| <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the stat information about the file. |
| @throws FileNotFoundException if the file does not exist.]]> |
| </doc> |
| </method> |
| <method name="setPermission" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="p" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc }]]> |
| </doc> |
| </method> |
| <method name="setOwner" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="p" type="org.apache.hadoop.fs.Path"/> |
| <param name="username" type="java.lang.String"/> |
| <param name="groupname" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc }]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Implementation of the abstract FileSystem for the DFS system. |
| This object is the way end-user code interacts with a Hadoop |
| DistributedFileSystem.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.DistributedFileSystem --> |
| <!-- start class org.apache.hadoop.dfs.DistributedFileSystem.DiskStatus --> |
| <class name="DistributedFileSystem.DiskStatus" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="DistributedFileSystem.DiskStatus" type="long, long, long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getCapacity" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getDfsUsed" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getRemaining" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.DistributedFileSystem.DiskStatus --> |
| <!-- start class org.apache.hadoop.dfs.FileDataServlet --> |
| <class name="FileDataServlet" extends="org.apache.hadoop.dfs.DfsServlet" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="FileDataServlet" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="doGet" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="request" type="javax.servlet.http.HttpServletRequest"/> |
| <param name="response" type="javax.servlet.http.HttpServletResponse"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Service a GET request as described below. |
| Request: |
| {@code |
| GET http://<nn>:<port>/data[/<path>] HTTP/1.1 |
| }]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Redirect queries about the hosted filesystem to an appropriate datanode. |
| @see org.apache.hadoop.dfs.HftpFileSystem]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.FileDataServlet --> |
| <!-- start class org.apache.hadoop.dfs.FsckServlet --> |
| <class name="FsckServlet" extends="javax.servlet.http.HttpServlet" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="FsckServlet" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="doGet" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="request" type="javax.servlet.http.HttpServletRequest"/> |
| <param name="response" type="javax.servlet.http.HttpServletResponse"/> |
| <exception name="ServletException" type="javax.servlet.ServletException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[This class is used in Namesystem's jetty to do fsck on namenode.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.FsckServlet --> |
| <!-- start interface org.apache.hadoop.dfs.FSConstants --> |
| <interface name="FSConstants" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <field name="MIN_BLOCKS_FOR_WRITE" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_ERROR" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_HEARTBEAT" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_BLOCKRECEIVED" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_BLOCKREPORT" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_TRANSFERDATA" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_OPEN" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_STARTFILE" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_ADDBLOCK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_RENAMETO" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_DELETE" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_COMPLETEFILE" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_LISTING" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_OBTAINLOCK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_RELEASELOCK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_EXISTS" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_ISDIR" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_MKDIRS" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_RENEW_LEASE" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_ABANDONBLOCK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_RAWSTATS" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_DATANODEREPORT" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_DATANODE_HINTS" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_TRANSFERBLOCKS" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_INVALIDATE_BLOCKS" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_FAILURE" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_OPEN_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_STARTFILE_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_ADDBLOCK_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_RENAMETO_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_DELETE_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_COMPLETEFILE_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_TRYAGAIN" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_LISTING_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_OBTAINLOCK_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_RELEASELOCK_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_EXISTS_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_ISDIR_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_MKDIRS_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_RENEW_LEASE_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_ABANDONBLOCK_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_RAWSTATS_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_DATANODEREPORT_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_CLIENT_DATANODE_HINTS_ACK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_WRITE_BLOCK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_READ_BLOCK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_READ_METADATA" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_REPLACE_BLOCK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_COPY_BLOCK" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_STATUS_SUCCESS" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_STATUS_ERROR" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_STATUS_ERROR_CHECKSUM" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_STATUS_ERROR_INVALID" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_STATUS_ERROR_EXISTS" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="OP_STATUS_CHECKSUM_OK" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="DATA_TRANSFER_VERSION" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Version for data transfers between clients and datanodes |
| This should change when serialization of DatanodeInfo, not just |
| when protocol changes. It is not very obvious.]]> |
| </doc> |
| </field> |
| <field name="OPERATION_FAILED" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="STILL_WAITING" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="COMPLETE_SUCCESS" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="BLOCK_INVALIDATE_CHUNK" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="HEARTBEAT_INTERVAL" type="long" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="BLOCKREPORT_INTERVAL" type="long" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="BLOCKREPORT_INITIAL_DELAY" type="long" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="LEASE_SOFTLIMIT_PERIOD" type="long" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="LEASE_HARDLIMIT_PERIOD" type="long" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="READ_TIMEOUT" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="WRITE_TIMEOUT" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="WRITE_TIMEOUT_EXTENSION" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="MAX_PATH_LENGTH" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="MAX_PATH_DEPTH" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="BUFFER_SIZE" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="SMALL_BUFFER_SIZE" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="DEFAULT_BLOCK_SIZE" type="long" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="DEFAULT_DATA_SOCKET_SIZE" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="SIZE_OF_INTEGER" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="LAYOUT_VERSION" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Some handy constants]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.dfs.FSConstants --> |
| <!-- start class org.apache.hadoop.dfs.FSConstants.CheckpointStates --> |
| <class name="FSConstants.CheckpointStates" extends="java.lang.Enum<org.apache.hadoop.dfs.FSConstants.CheckpointStates>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.dfs.FSConstants.CheckpointStates[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.CheckpointStates" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.FSConstants.CheckpointStates --> |
| <!-- start class org.apache.hadoop.dfs.FSConstants.DatanodeReportType --> |
| <class name="FSConstants.DatanodeReportType" extends="java.lang.Enum<org.apache.hadoop.dfs.FSConstants.DatanodeReportType>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.dfs.FSConstants.DatanodeReportType[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.DatanodeReportType" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.FSConstants.DatanodeReportType --> |
| <!-- start class org.apache.hadoop.dfs.FSConstants.NodeType --> |
| <class name="FSConstants.NodeType" extends="java.lang.Enum<org.apache.hadoop.dfs.FSConstants.NodeType>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.dfs.FSConstants.NodeType[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.NodeType" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| <doc> |
| <![CDATA[Type of the node]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.FSConstants.NodeType --> |
| <!-- start class org.apache.hadoop.dfs.FSConstants.SafeModeAction --> |
| <class name="FSConstants.SafeModeAction" extends="java.lang.Enum<org.apache.hadoop.dfs.FSConstants.SafeModeAction>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.dfs.FSConstants.SafeModeAction[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.SafeModeAction" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.FSConstants.SafeModeAction --> |
| <!-- start class org.apache.hadoop.dfs.FSConstants.StartupOption --> |
| <class name="FSConstants.StartupOption" extends="java.lang.Enum<org.apache.hadoop.dfs.FSConstants.StartupOption>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.dfs.FSConstants.StartupOption[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.StartupOption" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.FSConstants.StartupOption --> |
| <!-- start class org.apache.hadoop.dfs.FSConstants.UpgradeAction --> |
| <class name="FSConstants.UpgradeAction" extends="java.lang.Enum<org.apache.hadoop.dfs.FSConstants.UpgradeAction>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.dfs.FSConstants.UpgradeAction[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.dfs.FSConstants.UpgradeAction" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| <doc> |
| <![CDATA[Distributed upgrade actions: |
| |
| 1. Get upgrade status. |
| 2. Get detailed upgrade status. |
| 3. Proceed with the upgrade if it is stuck, no matter what the status is.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.FSConstants.UpgradeAction --> |
| <!-- start interface org.apache.hadoop.dfs.FSDatasetInterface --> |
| <interface name="FSDatasetInterface" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.dfs.datanode.metrics.FSDatasetMBean"/> |
| <method name="getMetaDataLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="org.apache.hadoop.dfs.Block"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the length of the metadata file of the specified block |
| @param b - the block for which the metadata length is desired |
| @return the length of the metadata file for the specified block. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getMetaDataInputStream" return="org.apache.hadoop.dfs.FSDatasetInterface.MetaDataInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="org.apache.hadoop.dfs.Block"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns metaData of block b as an input stream (and its length) |
| @param b - the block |
| @return the metadata input stream; |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="metaFileExists" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="org.apache.hadoop.dfs.Block"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Does the meta file exist for this block? |
| @param b - the block |
| @return true of the metafile for specified block exits |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="org.apache.hadoop.dfs.Block"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the specified block's on-disk length (excluding metadata) |
| @param b |
| @return the specified block's on-disk length (excluding metadta) |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getBlockInputStream" return="java.io.InputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="org.apache.hadoop.dfs.Block"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns an input stream to read the contents of the specified block |
| @param b |
| @return an input stream to read the contents of the specified block |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getBlockInputStream" return="java.io.InputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="org.apache.hadoop.dfs.Block"/> |
| <param name="seekOffset" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns an input stream at specified offset of the specified block |
| @param b |
| @param seekOffset |
| @return an input stream to read the contents of the specified block, |
| starting at the offset |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="writeToBlock" return="org.apache.hadoop.dfs.FSDatasetInterface.BlockWriteStreams" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="org.apache.hadoop.dfs.Block"/> |
| <param name="isRecovery" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Creates the block and returns output streams to write data and CRC |
| @param b |
| @param isRecovery True if this is part of erro recovery, otherwise false |
| @return a BlockWriteStreams object to allow writing the block data |
| and CRC |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="finalizeBlock" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="org.apache.hadoop.dfs.Block"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Finalizes the block previously opened for writing using writeToBlock. |
| The block size is what is in the parameter b and it must match the amount |
| of data written |
| @param b |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="unfinalizeBlock" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="org.apache.hadoop.dfs.Block"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Unfinalizes the block previously opened for writing using writeToBlock. |
| The temporary file associated with this block is deleted. |
| @param b |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getBlockReport" return="org.apache.hadoop.dfs.Block[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the block report - the full list of blocks stored |
| @return - the block report - the full list of blocks stored]]> |
| </doc> |
| </method> |
| <method name="isValidBlock" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="org.apache.hadoop.dfs.Block"/> |
| <doc> |
| <![CDATA[Is the block valid? |
| @param b |
| @return - true if the specified block is valid]]> |
| </doc> |
| </method> |
| <method name="invalidate" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="invalidBlks" type="org.apache.hadoop.dfs.Block[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Invalidates the specified blocks |
| @param invalidBlks - the blocks to be invalidated |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="checkDataDir" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/> |
| <doc> |
| <![CDATA[Check if all the data directories are healthy |
| @throws DiskErrorException]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Stringifies the name of the storage]]> |
| </doc> |
| </method> |
| <method name="shutdown" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Shutdown the FSDataset]]> |
| </doc> |
| </method> |
| <method name="getChannelPosition" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="org.apache.hadoop.dfs.Block"/> |
| <param name="stream" type="org.apache.hadoop.dfs.FSDatasetInterface.BlockWriteStreams"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the current offset in the data stream. |
| @param b |
| @param stream The stream to the data file and checksum file |
| @return the position of the file pointer in the data stream |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="setChannelPosition" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="org.apache.hadoop.dfs.Block"/> |
| <param name="stream" type="org.apache.hadoop.dfs.FSDatasetInterface.BlockWriteStreams"/> |
| <param name="dataOffset" type="long"/> |
| <param name="ckOffset" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Sets the file pointer of the data stream and checksum stream to |
| the specified values. |
| @param b |
| @param stream The stream for the data file and checksum file |
| @param dataOffset The position to which the file pointre for the data stream |
| should be set |
| @param ckOffset The position to which the file pointre for the checksum stream |
| should be set |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This is an interface for the underlying storage that stores blocks for |
| a data node. |
| Examples are the FSDataset (which stores blocks on dirs) and |
| SimulatedFSDataset (which simulates data).]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.dfs.FSDatasetInterface --> |
| <!-- start class org.apache.hadoop.dfs.FSDatasetInterface.BlockWriteStreams --> |
| <class name="FSDatasetInterface.BlockWriteStreams" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[This class contains the output streams for the data and checksum |
| of a block]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.FSDatasetInterface.BlockWriteStreams --> |
| <!-- start class org.apache.hadoop.dfs.FSDatasetInterface.MetaDataInputStream --> |
| <class name="FSDatasetInterface.MetaDataInputStream" extends="java.io.FilterInputStream" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[This class provides the input stream and length of the metadata |
| of a block]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.FSDatasetInterface.MetaDataInputStream --> |
| <!-- start class org.apache.hadoop.dfs.GetImageServlet --> |
| <class name="GetImageServlet" extends="javax.servlet.http.HttpServlet" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="GetImageServlet" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="doGet" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="request" type="javax.servlet.http.HttpServletRequest"/> |
| <param name="response" type="javax.servlet.http.HttpServletResponse"/> |
| <exception name="ServletException" type="javax.servlet.ServletException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[This class is used in Namesystem's jetty to retrieve a file. |
| Typically used by the Secondary NameNode to retrieve image and |
| edit file for periodic checkpointing.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.GetImageServlet --> |
| <!-- start class org.apache.hadoop.dfs.HftpFileSystem --> |
| <class name="HftpFileSystem" extends="org.apache.hadoop.fs.FileSystem" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="HftpFileSystem" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="initialize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.net.URI"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getUri" return="java.net.URI" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="openConnection" return="java.net.HttpURLConnection" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="path" type="java.lang.String"/> |
| <param name="query" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Open an HTTP connection to the namenode to read file data and metadata. |
| @param path The path component of the URL |
| @param query The query component of the URL]]> |
| </doc> |
| </method> |
| <method name="open" return="org.apache.hadoop.fs.FSDataInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="buffersize" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="setWorkingDirectory" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="bufferSize" type="int"/> |
| <param name="replication" type="short"/> |
| <param name="blockSize" type="long"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="rename" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="recursive" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="mkdirs" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <field name="nnAddr" type="java.net.InetSocketAddress" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="ugi" type="org.apache.hadoop.security.UserGroupInformation" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="df" type="java.text.SimpleDateFormat" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[An implementation of a protocol for accessing filesystems over HTTP. |
| The following implementation provides a limited, read-only interface |
| to a filesystem over HTTP. |
| @see org.apache.hadoop.dfs.ListPathsServlet |
| @see org.apache.hadoop.dfs.FileDataServlet]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.HftpFileSystem --> |
| <!-- start class org.apache.hadoop.dfs.HsftpFileSystem --> |
| <class name="HsftpFileSystem" extends="org.apache.hadoop.dfs.HftpFileSystem" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="HsftpFileSystem" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="openConnection" return="java.net.HttpURLConnection" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="path" type="java.lang.String"/> |
| <param name="query" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getUri" return="java.net.URI" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[An implementation of a protocol for accessing filesystems over HTTPS. |
| The following implementation provides a limited, read-only interface |
| to a filesystem over HTTPS. |
| @see org.apache.hadoop.dfs.ListPathsServlet |
| @see org.apache.hadoop.dfs.FileDataServlet]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.HsftpFileSystem --> |
| <!-- start class org.apache.hadoop.dfs.JspHelper --> |
| <class name="JspHelper" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JspHelper" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="randomNode" return="org.apache.hadoop.dfs.DatanodeID" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="bestNode" return="org.apache.hadoop.dfs.DatanodeInfo" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="blk" type="org.apache.hadoop.dfs.LocatedBlock"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="streamBlockInAscii" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="addr" type="java.net.InetSocketAddress"/> |
| <param name="blockId" type="long"/> |
| <param name="blockSize" type="long"/> |
| <param name="offsetIntoBlock" type="long"/> |
| <param name="chunkSizeToView" type="long"/> |
| <param name="out" type="javax.servlet.jsp.JspWriter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="DFSNodesStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="live" type="java.util.ArrayList<org.apache.hadoop.dfs.DatanodeDescriptor>"/> |
| <param name="dead" type="java.util.ArrayList<org.apache.hadoop.dfs.DatanodeDescriptor>"/> |
| </method> |
| <method name="addTableHeader" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="javax.servlet.jsp.JspWriter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="addTableRow" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="javax.servlet.jsp.JspWriter"/> |
| <param name="columns" type="java.lang.String[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="addTableRow" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="javax.servlet.jsp.JspWriter"/> |
| <param name="columns" type="java.lang.String[]"/> |
| <param name="row" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="addTableFooter" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="javax.servlet.jsp.JspWriter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getSafeModeText" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getInodeLimitText" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getUpgradeStatusText" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="sortNodeList" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="nodes" type="java.util.ArrayList<org.apache.hadoop.dfs.DatanodeDescriptor>"/> |
| <param name="field" type="java.lang.String"/> |
| <param name="order" type="java.lang.String"/> |
| </method> |
| <method name="printPathWithLinks" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dir" type="java.lang.String"/> |
| <param name="out" type="javax.servlet.jsp.JspWriter"/> |
| <param name="namenodeInfoPort" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="printGotoForm" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="javax.servlet.jsp.JspWriter"/> |
| <param name="namenodeInfoPort" type="int"/> |
| <param name="file" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createTitle" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="javax.servlet.jsp.JspWriter"/> |
| <param name="req" type="javax.servlet.http.HttpServletRequest"/> |
| <param name="file" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="percentageGraph" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="perc" type="int"/> |
| <param name="width" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="percentageGraph" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="perc" type="float"/> |
| <param name="width" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <field name="WEB_UGI_PROPERTY_NAME" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="conf" type="org.apache.hadoop.conf.Configuration" |
| transient="false" volatile="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="webUGI" type="org.apache.hadoop.security.UnixUserGroupInformation" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.JspHelper --> |
| <!-- start class org.apache.hadoop.dfs.LeaseExpiredException --> |
| <class name="LeaseExpiredException" extends="java.io.IOException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="LeaseExpiredException" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[The lease that was being used to create this file has expired.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.LeaseExpiredException --> |
| <!-- start class org.apache.hadoop.dfs.ListPathsServlet --> |
| <class name="ListPathsServlet" extends="org.apache.hadoop.dfs.DfsServlet" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ListPathsServlet" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="buildRoot" return="java.util.Map<java.lang.String, java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="request" type="javax.servlet.http.HttpServletRequest"/> |
| <param name="doc" type="org.znerd.xmlenc.XMLOutputter"/> |
| <doc> |
| <![CDATA[Build a map from the query string, setting values and defaults.]]> |
| </doc> |
| </method> |
| <method name="doGet" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="request" type="javax.servlet.http.HttpServletRequest"/> |
| <param name="response" type="javax.servlet.http.HttpServletResponse"/> |
| <exception name="ServletException" type="javax.servlet.ServletException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Service a GET request as described below. |
| Request: |
| {@code |
| GET http://<nn>:<port>/listPaths[/<path>][<?option>[&option]*] HTTP/1.1 |
| } |
| |
| Where <i>option</i> (default) in: |
| recursive ("no") |
| filter (".*") |
| exclude ("\..*\.crc") |
| |
| Response: A flat list of files/directories in the following format: |
| {@code |
| <listing path="..." recursive="(yes|no)" filter="..." |
| time="yyyy-MM-dd hh:mm:ss UTC" version="..."> |
| <directory path="..." modified="yyyy-MM-dd hh:mm:ss"/> |
| <file path="..." modified="yyyy-MM-dd'T'hh:mm:ssZ" blocksize="..." |
| replication="..." size="..."/> |
| </listing> |
| }]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Obtain meta-information about a filesystem. |
| @see org.apache.hadoop.dfs.HftpFileSystem]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.ListPathsServlet --> |
| <!-- start class org.apache.hadoop.dfs.LocatedBlocks --> |
| <class name="LocatedBlocks" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <method name="getLocatedBlocks" return="java.util.List<org.apache.hadoop.dfs.LocatedBlock>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get located blocks.]]> |
| </doc> |
| </method> |
| <method name="get" return="org.apache.hadoop.dfs.LocatedBlock" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="index" type="int"/> |
| <doc> |
| <![CDATA[Get located block.]]> |
| </doc> |
| </method> |
| <method name="locatedBlockCount" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get number of located blocks.]]> |
| </doc> |
| </method> |
| <method name="getFileLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[Collection of blocks with their locations and the file length.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.LocatedBlocks --> |
| <!-- start class org.apache.hadoop.dfs.NameNode --> |
| <class name="NameNode" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.dfs.ClientProtocol"/> |
| <implements name="org.apache.hadoop.dfs.DatanodeProtocol"/> |
| <implements name="org.apache.hadoop.dfs.NamenodeProtocol"/> |
| <implements name="org.apache.hadoop.dfs.FSConstants"/> |
| <constructor name="NameNode" type="org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Start NameNode. |
| <p> |
| The name-node can be started with one of the following startup options: |
| <ul> |
| <li>{@link FSConstants.StartupOption#REGULAR REGULAR} - normal startup</li> |
| <li>{@link FSConstants.StartupOption#FORMAT FORMAT} - format name node</li> |
| <li>{@link FSConstants.StartupOption#UPGRADE UPGRADE} - start the cluster |
| upgrade and create a snapshot of the current file system state</li> |
| <li>{@link FSConstants.StartupOption#ROLLBACK ROLLBACK} - roll the |
| cluster back to the previous state</li> |
| </ul> |
| The option is passed via configuration field: |
| <tt>dfs.namenode.startup</tt> |
| |
| The conf will be modified to reflect the actual ports on which |
| the NameNode is up and running if the user passes the port as |
| <code>zero</code> in the conf. |
| |
| @param conf confirguration |
| @throws IOException]]> |
| </doc> |
| </constructor> |
| <constructor name="NameNode" type="java.lang.String, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a NameNode at the specified location and start it. |
| |
| The conf will be modified to reflect the actual ports on which |
| the NameNode is up and running if the user passes the port as |
| <code>zero</code>.]]> |
| </doc> |
| </constructor> |
| <method name="getProtocolVersion" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="protocol" type="java.lang.String"/> |
| <param name="clientVersion" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="format" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Format a new filesystem. Destroys any filesystem that may already |
| exist at this location.]]> |
| </doc> |
| </method> |
| <method name="getNameNodeMetrics" return="org.apache.hadoop.dfs.NameNodeMetrics" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="join" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Wait for service to finish. |
| (Normally, it runs forever.)]]> |
| </doc> |
| </method> |
| <method name="stop" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Stop all NameNode threads and wait for all to finish.]]> |
| </doc> |
| </method> |
| <method name="getBlocks" return="org.apache.hadoop.dfs.BlocksWithLocations" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="datanode" type="org.apache.hadoop.dfs.DatanodeInfo"/> |
| <param name="size" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[return a list of blocks & their locations on <code>datanode</code> whose |
| total size is <code>size</code> |
| |
| @param datanode on which blocks are located |
| @param size total size of blocks]]> |
| </doc> |
| </method> |
| <method name="getBlockLocations" return="org.apache.hadoop.dfs.LocatedBlocks" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <param name="offset" type="long"/> |
| <param name="length" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="create" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <param name="clientName" type="java.lang.String"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="replication" type="short"/> |
| <param name="blockSize" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="setReplication" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <param name="replication" type="short"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="setPermission" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <param name="permissions" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="setOwner" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <param name="username" type="java.lang.String"/> |
| <param name="groupname" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="addBlock" return="org.apache.hadoop.dfs.LocatedBlock" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <param name="clientName" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="abandonBlock" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="org.apache.hadoop.dfs.Block"/> |
| <param name="src" type="java.lang.String"/> |
| <param name="holder" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The client needs to give up on the block.]]> |
| </doc> |
| </method> |
| <method name="abandonFileInProgress" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <param name="holder" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="complete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <param name="clientName" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="reportBadBlocks" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="blocks" type="org.apache.hadoop.dfs.LocatedBlock[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The client has detected an error on the specified located blocks |
| and is reporting them to the server. For now, the namenode will |
| delete the blocks from the datanodes. In the future we might |
| check the blocks are actually corrupt.]]> |
| </doc> |
| </method> |
| <method name="getPreferredBlockSize" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="filename" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="rename" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <param name="dst" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <param name="recursive" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="exists" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="Use getFileInfo(String) instead"> |
| <param name="src" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@deprecated Use getFileInfo(String) instead]]> |
| </doc> |
| </method> |
| <method name="mkdirs" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="renewLease" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="clientName" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getListing" return="org.apache.hadoop.dfs.DFSFileInfo[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getFileInfo" return="org.apache.hadoop.dfs.DFSFileInfo" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the file info for a specific file. |
| @param src The string representation of the path to the file |
| @throws IOException if permission to access file is denied by the system |
| @return object containing information regarding the file |
| or null if file not found]]> |
| </doc> |
| </method> |
| <method name="getStats" return="long[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getDatanodeReport" return="org.apache.hadoop.dfs.DatanodeInfo[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="type" type="org.apache.hadoop.dfs.FSConstants.DatanodeReportType"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="setSafeMode" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="action" type="org.apache.hadoop.dfs.FSConstants.SafeModeAction"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="isInSafeMode" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Is the cluster currently in safe mode?]]> |
| </doc> |
| </method> |
| <method name="refreshNodes" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getEditLogSize" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the size of the current edit log.]]> |
| </doc> |
| </method> |
| <method name="rollEditLog" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Roll the edit log.]]> |
| </doc> |
| </method> |
| <method name="rollFsImage" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Roll the image]]> |
| </doc> |
| </method> |
| <method name="finalizeUpgrade" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="distributedUpgradeProgress" return="org.apache.hadoop.dfs.UpgradeStatusReport" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="action" type="org.apache.hadoop.dfs.FSConstants.UpgradeAction"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="metaSave" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="filename" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Dumps namenode state into specified file]]> |
| </doc> |
| </method> |
| <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="fsync" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <param name="clientName" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="register" return="org.apache.hadoop.dfs.DatanodeRegistration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="sendHeartbeat" return="org.apache.hadoop.dfs.DatanodeCommand" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/> |
| <param name="capacity" type="long"/> |
| <param name="dfsUsed" type="long"/> |
| <param name="remaining" type="long"/> |
| <param name="xmitsInProgress" type="int"/> |
| <param name="xceiverCount" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Data node notify the name node that it is alive |
| Return a block-oriented command for the datanode to execute. |
| This will be either a transfer or a delete operation.]]> |
| </doc> |
| </method> |
| <method name="blockReport" return="org.apache.hadoop.dfs.DatanodeCommand" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/> |
| <param name="blocks" type="long[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="blockReceived" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/> |
| <param name="blocks" type="org.apache.hadoop.dfs.Block[]"/> |
| <param name="delHints" type="java.lang.String[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="errorReport" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/> |
| <param name="errorCode" type="int"/> |
| <param name="msg" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="versionRequest" return="org.apache.hadoop.dfs.NamespaceInfo" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="processUpgradeCommand" return="org.apache.hadoop.dfs.UpgradeCommand" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="comm" type="org.apache.hadoop.dfs.UpgradeCommand"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="blockCrcUpgradeGetBlockLocations" return="org.apache.hadoop.dfs.BlockCrcInfo" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="block" type="org.apache.hadoop.dfs.Block"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="verifyRequest" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="nodeReg" type="org.apache.hadoop.dfs.DatanodeRegistration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Verify request. |
| |
| Verifies correctness of the datanode version, registration ID, and |
| if the datanode does not need to be shutdown. |
| |
| @param nodeReg data node registration |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="verifyVersion" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="version" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Verify version. |
| |
| @param version |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getFsImageName" return="java.io.File" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the name of the fsImage file]]> |
| </doc> |
| </method> |
| <method name="getFsImageNameCheckpoint" return="java.io.File[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the name of the fsImage file uploaded by periodic |
| checkpointing]]> |
| </doc> |
| </method> |
| <method name="validateCheckpointUpload" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="token" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Validates that this is a valid checkpoint upload request]]> |
| </doc> |
| </method> |
| <method name="checkpointUploadDone" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Indicates that a new checkpoint has been successfully uploaded.]]> |
| </doc> |
| </method> |
| <method name="getFsEditName" return="java.io.File" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the name of the edits file]]> |
| </doc> |
| </method> |
| <method name="getNameNodeAddress" return="java.net.InetSocketAddress" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the address on which the NameNodes is listening to. |
| @return the address on which the NameNodes is listening to.]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="stateChangeLog" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[NameNode serves as both directory namespace manager and |
| "inode table" for the Hadoop DFS. There is a single NameNode |
| running in any DFS deployment. (Well, except when there |
| is a second backup/failover NameNode.) |
| |
| The NameNode controls two critical tables: |
| 1) filename->blocksequence (namespace) |
| 2) block->machinelist ("inodes") |
| |
| The first table is stored on disk and is very precious. |
| The second table is rebuilt every time the NameNode comes |
| up. |
| |
| 'NameNode' refers to both this class as well as the 'NameNode server'. |
| The 'FSNamesystem' class actually performs most of the filesystem |
| management. The majority of the 'NameNode' class itself is concerned |
| with exposing the IPC interface to the outside world, plus some |
| configuration management. |
| |
| NameNode implements the ClientProtocol interface, which allows |
| clients to ask for DFS services. ClientProtocol is not |
| designed for direct use by authors of DFS client code. End-users |
| should instead use the org.apache.nutch.hadoop.fs.FileSystem class. |
| |
| NameNode also implements the DatanodeProtocol interface, used by |
| DataNode programs that actually store DFS data blocks. These |
| methods are invoked repeatedly and automatically by all the |
| DataNodes in a DFS deployment. |
| |
| NameNode also implements the NamenodeProtocol interface, used by |
| secondary namenodes or rebalancing processes to get partial namenode's |
| state, for example partial blocksMap etc.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.NameNode --> |
| <!-- start class org.apache.hadoop.dfs.NamenodeFsck --> |
| <class name="NamenodeFsck" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="NamenodeFsck" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.dfs.NameNode, java.util.Map<java.lang.String, java.lang.String[]>, javax.servlet.http.HttpServletResponse" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Filesystem checker. |
| @param conf configuration (namenode config) |
| @param nn namenode that this fsck is going to use |
| @param pmap key=value[] map that is passed to the http servlet as url parameters |
| @param response the object into which this servelet writes the url contents |
| @throws IOException]]> |
| </doc> |
| </constructor> |
| <method name="fsck" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Check files on DFS, starting from the indicated path. |
| @throws Exception]]> |
| </doc> |
| </method> |
| <method name="run" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[@param args]]> |
| </doc> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="FIXING_NONE" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Don't attempt any fixing .]]> |
| </doc> |
| </field> |
| <field name="FIXING_MOVE" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Move corrupted files to /lost+found .]]> |
| </doc> |
| </field> |
| <field name="FIXING_DELETE" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Delete corrupted files.]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[This class provides rudimentary checking of DFS volumes for errors and |
| sub-optimal conditions. |
| <p>The tool scans all files and directories, starting from an indicated |
| root path. The following abnormal conditions are detected and handled:</p> |
| <ul> |
| <li>files with blocks that are completely missing from all datanodes.<br/> |
| In this case the tool can perform one of the following actions: |
| <ul> |
| <li>none ({@link #FIXING_NONE})</li> |
| <li>move corrupted files to /lost+found directory on DFS |
| ({@link #FIXING_MOVE}). Remaining data blocks are saved as a |
| block chains, representing longest consecutive series of valid blocks.</li> |
| <li>delete corrupted files ({@link #FIXING_DELETE})</li> |
| </ul> |
| </li> |
| <li>detect files with under-replicated or over-replicated blocks</li> |
| </ul> |
| Additionally, the tool collects a detailed overall DFS statistics, and |
| optionally can print detailed statistics on block locations and replication |
| factors of each file.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.NamenodeFsck --> |
| <!-- start class org.apache.hadoop.dfs.NamenodeFsck.FsckResult --> |
| <class name="NamenodeFsck.FsckResult" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="NamenodeFsck.FsckResult" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="isHealthy" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[DFS is considered healthy if there are no missing blocks.]]> |
| </doc> |
| </method> |
| <method name="addMissing" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="id" type="java.lang.String"/> |
| <param name="size" type="long"/> |
| <doc> |
| <![CDATA[Add a missing block name, plus its size.]]> |
| </doc> |
| </method> |
| <method name="getMissingIds" return="java.util.ArrayList<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return a list of missing block names (as list of Strings).]]> |
| </doc> |
| </method> |
| <method name="getMissingSize" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return total size of missing data, in bytes.]]> |
| </doc> |
| </method> |
| <method name="setMissingSize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="missingSize" type="long"/> |
| </method> |
| <method name="getExcessiveReplicas" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the number of over-replicated blocks.]]> |
| </doc> |
| </method> |
| <method name="setExcessiveReplicas" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="overReplicatedBlocks" type="long"/> |
| </method> |
| <method name="getReplicationFactor" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the actual replication factor.]]> |
| </doc> |
| </method> |
| <method name="getMissingReplicas" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the number of under-replicated blocks. Note: missing blocks are not counted here.]]> |
| </doc> |
| </method> |
| <method name="setMissingReplicas" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="underReplicatedBlocks" type="long"/> |
| </method> |
| <method name="getTotalDirs" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return total number of directories encountered during this scan.]]> |
| </doc> |
| </method> |
| <method name="setTotalDirs" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="totalDirs" type="long"/> |
| </method> |
| <method name="getTotalFiles" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return total number of files encountered during this scan.]]> |
| </doc> |
| </method> |
| <method name="setTotalFiles" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="totalFiles" type="long"/> |
| </method> |
| <method name="getTotalSize" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return total size of scanned data, in bytes.]]> |
| </doc> |
| </method> |
| <method name="setTotalSize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="totalSize" type="long"/> |
| </method> |
| <method name="getReplication" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the intended replication factor, against which the over/under- |
| replicated blocks are counted. Note: this values comes from the current |
| Configuration supplied for the tool, so it may be different from the |
| value in DFS Configuration.]]> |
| </doc> |
| </method> |
| <method name="setReplication" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="replication" type="int"/> |
| </method> |
| <method name="getTotalBlocks" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the total number of blocks in the scanned area.]]> |
| </doc> |
| </method> |
| <method name="setTotalBlocks" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="totalBlocks" type="long"/> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getCorruptFiles" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the number of currupted files.]]> |
| </doc> |
| </method> |
| <method name="setCorruptFiles" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="corruptFiles" type="long"/> |
| </method> |
| <doc> |
| <![CDATA[FsckResult of checking, plus overall DFS statistics.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.NamenodeFsck.FsckResult --> |
| <!-- start class org.apache.hadoop.dfs.NameNodeMetrics --> |
| <class name="NameNodeMetrics" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.metrics.Updater"/> |
| <method name="shutdown" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="doUpdates" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/> |
| <doc> |
| <![CDATA[Since this object is a registered updater, this method will be called |
| periodically, e.g. every 5 seconds.]]> |
| </doc> |
| </method> |
| <method name="resetAllMinMax" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <field name="numFilesCreated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="numGetBlockLocations" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="numFilesRenamed" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="numFilesListed" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="transactions" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="syncs" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="blockReport" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="safeModeTime" type="org.apache.hadoop.metrics.util.MetricsIntValue" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="fsImageLoadTime" type="org.apache.hadoop.metrics.util.MetricsIntValue" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[This class is for maintaining the various NameNode statistics |
| and publishing them through the metrics interfaces. |
| This also registers the JMX MBean for RPC. |
| <p> |
| This class has a number of metrics variables that are publicly accessible; |
| these variables (objects) have methods to update their values; |
| for example: |
| <p> {@link #syncs}.inc()]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.NameNodeMetrics --> |
| <!-- start class org.apache.hadoop.dfs.NotReplicatedYetException --> |
| <class name="NotReplicatedYetException" extends="java.io.IOException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="NotReplicatedYetException" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[The file has not finished being written to enough datanodes yet.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.NotReplicatedYetException --> |
| <!-- start class org.apache.hadoop.dfs.SafeModeException --> |
| <class name="SafeModeException" extends="java.io.IOException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SafeModeException" type="java.lang.String, org.apache.hadoop.dfs.FSNamesystem.SafeModeInfo" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[This exception is thrown when the name node is in safe mode. |
| Client cannot modified namespace until the safe mode is off.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.SafeModeException --> |
| <!-- start class org.apache.hadoop.dfs.SecondaryNameNode --> |
| <class name="SecondaryNameNode" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.dfs.FSConstants"/> |
| <implements name="java.lang.Runnable"/> |
| <constructor name="SecondaryNameNode" type="org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a connection to the primary namenode.]]> |
| </doc> |
| </constructor> |
| <method name="shutdown" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Shut down this instance of the datanode. |
| Returns only after shutdown is complete.]]> |
| </doc> |
| </method> |
| <method name="run" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[main() has some simple utility methods. |
| @param argv Command line parameters. |
| @exception Exception if the filesystem does not exist.]]> |
| </doc> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[The Secondary NameNode is a helper to the primary NameNode. |
| The Secondary is responsible for supporting periodic checkpoints |
| of the HDFS metadata. The current design allows only one Secondary |
| NameNode per HDFs cluster. |
| |
| The Secondary NameNode is a daemon that periodically wakes |
| up (determined by the schedule specified in the configuration), |
| triggers a periodic checkpoint and then goes back to sleep. |
| The Secondary NameNode uses the ClientProtocol to talk to the |
| primary NameNode.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.SecondaryNameNode --> |
| <!-- start class org.apache.hadoop.dfs.SecondaryNameNode.GetImageServlet --> |
| <class name="SecondaryNameNode.GetImageServlet" extends="javax.servlet.http.HttpServlet" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SecondaryNameNode.GetImageServlet" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="doGet" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="request" type="javax.servlet.http.HttpServletRequest"/> |
| <param name="response" type="javax.servlet.http.HttpServletResponse"/> |
| <exception name="ServletException" type="javax.servlet.ServletException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[This class is used in Namesystem's jetty to retrieve a file. |
| Typically used by the Secondary NameNode to retrieve image and |
| edit file for periodic checkpointing.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.SecondaryNameNode.GetImageServlet --> |
| <!-- start class org.apache.hadoop.dfs.StreamFile --> |
| <class name="StreamFile" extends="org.apache.hadoop.dfs.DfsServlet" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="StreamFile" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="doGet" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="request" type="javax.servlet.http.HttpServletRequest"/> |
| <param name="response" type="javax.servlet.http.HttpServletResponse"/> |
| <exception name="ServletException" type="javax.servlet.ServletException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.StreamFile --> |
| <!-- start interface org.apache.hadoop.dfs.Upgradeable --> |
| <interface name="Upgradeable" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.lang.Comparable<org.apache.hadoop.dfs.Upgradeable>"/> |
| <method name="getVersion" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the layout version of the upgrade object. |
| @return layout version]]> |
| </doc> |
| </method> |
| <method name="getType" return="org.apache.hadoop.dfs.FSConstants.NodeType" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the type of the software component, which this object is upgrading. |
| @return type]]> |
| </doc> |
| </method> |
| <method name="getDescription" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Description of the upgrade object for displaying. |
| @return description]]> |
| </doc> |
| </method> |
| <method name="getUpgradeStatus" return="short" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Upgrade status determines a percentage of the work done out of the total |
| amount required by the upgrade. |
| |
| 100% means that the upgrade is completed. |
| Any value < 100 means it is not complete. |
| |
| The return value should provide at least 2 values, e.g. 0 and 100. |
| @return integer value in the range [0, 100].]]> |
| </doc> |
| </method> |
| <method name="startUpgrade" return="org.apache.hadoop.dfs.UpgradeCommand" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Prepare for the upgrade. |
| E.g. initialize upgrade data structures and set status to 0. |
| |
| Returns an upgrade command that is used for broadcasting to other cluster |
| components. |
| E.g. name-node informs data-nodes that they must perform a distributed upgrade. |
| |
| @return an UpgradeCommand for broadcasting. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="completeUpgrade" return="org.apache.hadoop.dfs.UpgradeCommand" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Complete upgrade. |
| E.g. cleanup upgrade data structures or write metadata to disk. |
| |
| Returns an upgrade command that is used for broadcasting to other cluster |
| components. |
| E.g. data-nodes inform the name-node that they completed the upgrade |
| while other data-nodes are still upgrading. |
| |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getUpgradeStatusReport" return="org.apache.hadoop.dfs.UpgradeStatusReport" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="details" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get status report for the upgrade. |
| |
| @param details true if upgradeStatus details need to be included, |
| false otherwise |
| @return {@link UpgradeStatusReport} |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Common interface for distributed upgrade objects. |
| |
| Each upgrade object corresponds to a layout version, |
| which is the latest version that should be upgraded using this object. |
| That is all components whose layout version is greater or equal to the |
| one returned by {@link #getVersion()} must be upgraded with this object.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.dfs.Upgradeable --> |
| <!-- start class org.apache.hadoop.dfs.UpgradeStatusReport --> |
| <class name="UpgradeStatusReport" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <constructor name="UpgradeStatusReport" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="UpgradeStatusReport" type="int, short, boolean" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getVersion" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the layout version of the currently running upgrade. |
| @return layout version]]> |
| </doc> |
| </method> |
| <method name="getUpgradeStatus" return="short" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get upgrade upgradeStatus as a percentage of the total upgrade done. |
| |
| @see Upgradeable#getUpgradeStatus()]]> |
| </doc> |
| </method> |
| <method name="isFinalized" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Is current upgrade finalized. |
| @return true if finalized or false otherwise.]]> |
| </doc> |
| </method> |
| <method name="getStatusText" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="details" type="boolean"/> |
| <doc> |
| <![CDATA[Get upgradeStatus data as a text for reporting. |
| Should be overloaded for a particular upgrade specific upgradeStatus data. |
| |
| @param details true if upgradeStatus details need to be included, |
| false otherwise |
| @return text]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Print basic upgradeStatus details.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <field name="version" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="upgradeStatus" type="short" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="finalized" type="boolean" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Base upgrade upgradeStatus class. |
| Overload this class if specific status fields need to be reported. |
| |
| Describes status of current upgrade.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.UpgradeStatusReport --> |
| <doc> |
| <![CDATA[<p>A distributed implementation of {@link |
| org.apache.hadoop.fs.FileSystem}. This is loosely modelled after |
| Google's <a href="http://labs.google.com/papers/gfs.html">GFS</a>.</p> |
| |
| <p>The most important difference is that unlike GFS, Hadoop DFS files |
| have strictly one writer at any one time. Bytes are always appended |
| to the end of the writer's stream. There is no notion of "record appends" |
| or "mutations" that are then checked or reordered. Writers simply emit |
| a byte stream. That byte stream is guaranteed to be stored in the |
| order written.</p>]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.dfs.datanode.metrics"> |
| <!-- start class org.apache.hadoop.dfs.datanode.metrics.DataNodeMetrics --> |
| <class name="DataNodeMetrics" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.metrics.Updater"/> |
| <constructor name="DataNodeMetrics" type="org.apache.hadoop.conf.Configuration, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="shutdown" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="doUpdates" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/> |
| <doc> |
| <![CDATA[Since this object is a registered updater, this method will be called |
| periodically, e.g. every 5 seconds.]]> |
| </doc> |
| </method> |
| <method name="resetAllMinMax" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <field name="bytesWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="bytesRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="blocksWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="blocksRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="blocksReplicated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="blocksRemoved" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="blocksVerified" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="blockVerificationFailures" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="readsFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="readsFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="writesFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="writesFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="readBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="writeBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="readMetadataOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="copyBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="replaceBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="heartbeats" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="blockReports" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[This class is for maintaining the various DataNode statistics |
| and publishing them through the metrics interfaces. |
| This also registers the JMX MBean for RPC. |
| <p> |
| This class has a number of metrics variables that are publicly accessible; |
| these variables (objects) have methods to update their values; |
| for example: |
| <p> {@link #blocksRead}.inc()]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.datanode.metrics.DataNodeMetrics --> |
| <!-- start class org.apache.hadoop.dfs.datanode.metrics.DataNodeStatistics --> |
| <class name="DataNodeStatistics" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.dfs.datanode.metrics.DataNodeStatisticsMBean"/> |
| <method name="shutdown" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Shuts down the statistics |
| - unregisters the mbean]]> |
| </doc> |
| </method> |
| <method name="resetAllMinMax" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getBlocksRead" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getBlocksRemoved" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getBlocksReplicated" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getBlocksWritten" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getBytesRead" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getBlockVerificationFailures" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getBlocksVerified" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getReadsFromLocalClient" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getReadsFromRemoteClient" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getWritesFromLocalClient" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getWritesFromRemoteClient" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getReadBlockOpAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getReadBlockOpMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getReadBlockOpMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getReadBlockOpNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getReadMetadataOpAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getReadMetadataOpMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getReadMetadataOpMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getReadMetadataOpNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getReplaceBlockOpAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getReplaceBlockOpMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getReplaceBlockOpMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getReplaceBlockOpNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getWriteBlockOpAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getWriteBlockOpMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getWriteBlockOpMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getWriteBlockOpNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getCopyBlockOpAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getCopyBlockOpMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getCopyBlockOpMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getCopyBlockOpNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getBlockReportsAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getBlockReportsMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getBlockReportsMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getBlockReportsNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getHeartbeatsAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getHeartbeatsMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getHeartbeatsMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getHeartbeatsNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.datanode.metrics.DataNodeStatistics --> |
| <!-- start interface org.apache.hadoop.dfs.datanode.metrics.DataNodeStatisticsMBean --> |
| <interface name="DataNodeStatisticsMBean" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getBytesRead" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of bytes read in the last interval |
| @return number of bytes read]]> |
| </doc> |
| </method> |
| <method name="getBlocksWritten" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of blocks written in the last interval |
| @return number of blocks written]]> |
| </doc> |
| </method> |
| <method name="getBlocksRead" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of blocks read in the last interval |
| @return number of blocks read]]> |
| </doc> |
| </method> |
| <method name="getBlocksReplicated" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of blocks replicated in the last interval |
| @return number of blocks replicated]]> |
| </doc> |
| </method> |
| <method name="getBlocksRemoved" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of blocks removed in the last interval |
| @return number of blocks removed]]> |
| </doc> |
| </method> |
| <method name="getBlocksVerified" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of blocks verified in the last interval |
| @return number of blocks verified]]> |
| </doc> |
| </method> |
| <method name="getBlockVerificationFailures" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of block verification failures in the last interval |
| @return number of block verification failures]]> |
| </doc> |
| </method> |
| <method name="getReadsFromLocalClient" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of reads from local clients in the last interval |
| @return number of reads from local clients]]> |
| </doc> |
| </method> |
| <method name="getReadsFromRemoteClient" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of reads from remote clients in the last interval |
| @return number of reads from remote clients]]> |
| </doc> |
| </method> |
| <method name="getWritesFromLocalClient" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of writes from local clients in the last interval |
| @return number of writes from local clients]]> |
| </doc> |
| </method> |
| <method name="getWritesFromRemoteClient" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of writes from remote clients in the last interval |
| @return number of writes from remote clients]]> |
| </doc> |
| </method> |
| <method name="getReadBlockOpNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of ReadBlock Operation in last interval |
| @return number of operations]]> |
| </doc> |
| </method> |
| <method name="getReadBlockOpAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Average time for ReadBlock Operation in last interval |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getReadBlockOpMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Minimum ReadBlock Operation Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getReadBlockOpMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Maximum ReadBlock Operation Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getWriteBlockOpNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of WriteBlock Operation in last interval |
| @return number of operations]]> |
| </doc> |
| </method> |
| <method name="getWriteBlockOpAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Average time for WriteBlock Operation in last interval |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getWriteBlockOpMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Minimum WriteBlock Operation Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getWriteBlockOpMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Maximum WriteBlock Operation Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getReadMetadataOpNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of ReadMetadata Operation in last interval |
| @return number of operations]]> |
| </doc> |
| </method> |
| <method name="getReadMetadataOpAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Average time for ReadMetadata Operation in last interval |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getReadMetadataOpMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Minimum ReadMetadata Operation Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getReadMetadataOpMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Maximum ReadMetadata Operation Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getCopyBlockOpNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of CopyBlock Operation in last interval |
| @return number of operations]]> |
| </doc> |
| </method> |
| <method name="getCopyBlockOpAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Average time for CopyBlock Operation in last interval |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getCopyBlockOpMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Minimum CopyBlock Operation Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getCopyBlockOpMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Maximum CopyBlock Operation Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getReplaceBlockOpNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of ReplaceBlock Operation in last interval |
| @return number of operations]]> |
| </doc> |
| </method> |
| <method name="getReplaceBlockOpAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Average time for ReplaceBlock Operation in last interval |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getReplaceBlockOpMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Minimum ReplaceBlock Operation Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getReplaceBlockOpMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Maximum ReplaceBlock Operation Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getBlockReportsNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of Block Reports sent in last interval |
| @return number of operations]]> |
| </doc> |
| </method> |
| <method name="getBlockReportsAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Average time for Block Reports Operation in last interval |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getBlockReportsMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Minimum Block Reports Operation Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getBlockReportsMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Maximum Block Reports Operation Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getHeartbeatsNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of Heartbeat Operation in last interval |
| @return number of operations]]> |
| </doc> |
| </method> |
| <method name="getHeartbeatsAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Average time for Heartbeat Operation in last interval |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getHeartbeatsMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Minimum Heartbeat Operation Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getHeartbeatsMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Maximum Heartbeat Operation Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="resetAllMinMax" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Reset all min max times]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This is the JMX interface for the runtime statistics for the data node. |
| Many of the statistics are sampled and averaged on an interval |
| which can be specified in the config file. |
| <p> |
| For the statistics that are sampled and averaged, one must specify |
| a metrics context that does periodic update calls. Most do. |
| The default Null metrics context however does NOT. So if you aren't |
| using any other metrics context then you can turn on the viewing and averaging |
| of sampled metrics by specifying the following two lines |
| in the hadoop-meterics.properties file: |
| <pre> |
| dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread |
| dfs.period=10 |
| </pre> |
| <p> |
| Note that the metrics are collected regardless of the context used. |
| The context with the update thread is used to average the data periodically. |
| <p> |
| Name Node Status info is reported in another MBean |
| @see org.apache.hadoop.dfs.datanode.metrics.FSDatasetMBean]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.dfs.datanode.metrics.DataNodeStatisticsMBean --> |
| <!-- start interface org.apache.hadoop.dfs.datanode.metrics.FSDatasetMBean --> |
| <interface name="FSDatasetMBean" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getDfsUsed" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the total space (in bytes) used by dfs datanode |
| @return the total space used by dfs datanode |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getCapacity" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns total capacity (in bytes) of storage (used and unused) |
| @return total capacity of storage (used and unused) |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getRemaining" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the amount of free storage space (in bytes) |
| @return The amount of free storage space |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getStorageInfo" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the storage id of the underlying storage]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This Interface defines the methods to get the status of a the FSDataset of |
| a data node. |
| It is also used for publishing via JMX (hence we follow the JMX naming |
| convention.) |
| <p> |
| Data Node runtime statistic info is report in another MBean |
| @see org.apache.hadoop.dfs.datanode.metrics.DataNodeStatisticsMBean]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.dfs.datanode.metrics.FSDatasetMBean --> |
| </package> |
| <package name="org.apache.hadoop.dfs.namenode.metrics"> |
| <!-- start interface org.apache.hadoop.dfs.namenode.metrics.FSNamesystemMBean --> |
| <interface name="FSNamesystemMBean" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getFSState" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The state of the file system: Safemode or Operational |
| @return the state]]> |
| </doc> |
| </method> |
| <method name="getBlocksTotal" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of allocated blocks in the system |
| @return - number of allocated blocks]]> |
| </doc> |
| </method> |
| <method name="getCapacityTotal" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Total storage capacity |
| @return - total capacity in bytes]]> |
| </doc> |
| </method> |
| <method name="getCapacityRemaining" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Free (unused) storage capacity |
| @return - free capacity in bytes]]> |
| </doc> |
| </method> |
| <method name="getCapacityUsed" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Used storage capacity |
| @return - used capacity in bytes]]> |
| </doc> |
| </method> |
| <method name="getFilesTotal" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Total number of files and directories |
| @return - num of files and directories]]> |
| </doc> |
| </method> |
| <method name="numLiveDataNodes" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of Live data nodes |
| @return number of live data nodes]]> |
| </doc> |
| </method> |
| <method name="numDeadDataNodes" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of dead data nodes |
| @return number of dead data nodes]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This Interface defines the methods to get the status of a the FSNamesystem of |
| a name node. |
| It is also used for publishing via JMX (hence we follow the JMX naming |
| convention.) |
| |
| <p> |
| Name Node runtime statistic info is report in another MBean |
| @see org.apache.hadoop.dfs.namenode.metrics.NameNodeStatisticsMBean]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.dfs.namenode.metrics.FSNamesystemMBean --> |
| <!-- start class org.apache.hadoop.dfs.namenode.metrics.NameNodeStatistics --> |
| <class name="NameNodeStatistics" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.dfs.namenode.metrics.NameNodeStatisticsMBean"/> |
| <constructor name="NameNodeStatistics" type="org.apache.hadoop.dfs.NameNodeMetrics" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[This constructs and registers the NameNodeStatisticsMBean |
| @param nameNodeMetrics - the metrics from which the mbean gets its info]]> |
| </doc> |
| </constructor> |
| <method name="shutdown" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Shuts down the statistics |
| - unregisters the mbean]]> |
| </doc> |
| </method> |
| <method name="getBlockReportAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getBlockReportMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getBlockReportMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getBlockReportNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getJournalTransactionAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getJournalTransactionNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getJournalTransactionMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getJournalTransactionMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getJournalSyncAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getJournalSyncMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getJournalSyncMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getJournalSyncNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getSafemodeTime" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getFSImageLoadTime" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="resetAllMinMax" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getNumFilesCreated" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getNumFilesListed" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getNumGetBlockLocations" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <method name="getNumFilesRenamed" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@inheritDoc]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This is the implementation of the Name Node JMX MBean]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.dfs.namenode.metrics.NameNodeStatistics --> |
| <!-- start interface org.apache.hadoop.dfs.namenode.metrics.NameNodeStatisticsMBean --> |
| <interface name="NameNodeStatisticsMBean" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getSafemodeTime" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The time spent in the Safemode at startup |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getFSImageLoadTime" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Time spent loading the FS Image at startup |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getJournalTransactionNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of Journal Transactions in the last interval |
| @return number of operations]]> |
| </doc> |
| </method> |
| <method name="getJournalTransactionAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Average time for Journal transactions in last interval |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getJournalTransactionMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Minimum Journal Transaction Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getJournalTransactionMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Maximum Journal Transaction Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getBlockReportNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of block Reports processed in the last interval |
| @return number of operations]]> |
| </doc> |
| </method> |
| <method name="getBlockReportAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Average time for Block Report Processing in last interval |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getBlockReportMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Minimum Block Report Processing Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getBlockReportMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Maximum Block Report Processing Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getJournalSyncNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of Journal Syncs in the last interval |
| @return number of operations]]> |
| </doc> |
| </method> |
| <method name="getJournalSyncAverageTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Average time for Journal Sync in last interval |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getJournalSyncMinTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Minimum Journal Sync Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getJournalSyncMaxTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Maximum Journal Sync Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="resetAllMinMax" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Reset all min max times]]> |
| </doc> |
| </method> |
| <method name="getNumFilesCreated" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of files created in the last interval |
| @return number of operations]]> |
| </doc> |
| </method> |
| <method name="getNumGetBlockLocations" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of |
| {@link org.apache.hadoop.dfs.NameNode#getBlockLocations(String,long,long)} |
| @return number of operations]]> |
| </doc> |
| </method> |
| <method name="getNumFilesRenamed" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of files renamed in the last interval |
| @return number of operations]]> |
| </doc> |
| </method> |
| <method name="getNumFilesListed" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of files listed in the last interval |
| @return number of operations]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This is the JMX management interface for getting runtime statistics of |
| the name node. |
| Many of the statistics are sampled and averaged on an interval |
| which can be specified in the config file. |
| <p> |
| For the statistics that are sampled and averaged, one must specify |
| a metrics context that does periodic update calls. Most do. |
| The default Null metrics context however does NOT. So if you aren't |
| using any other metrics context then you can turn on the viewing and averaging |
| of sampled metrics by specifying the following two lines |
| in the hadoop-meterics.properties file: |
| <pre> |
| dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread |
| dfs.period=10 |
| </pre> |
| <p> |
| Note that the metrics are collected regardless of the context used. |
| The context with the update thread is used to average the data periodically. |
| <p> |
| Name Node Status info is report in another MBean |
| @see org.apache.hadoop.dfs.namenode.metrics.FSNamesystemMBean]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.dfs.namenode.metrics.NameNodeStatisticsMBean --> |
| </package> |
| <package name="org.apache.hadoop.filecache"> |
| <!-- start class org.apache.hadoop.filecache.DistributedCache --> |
| <class name="DistributedCache" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="DistributedCache" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getLocalCache" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="cache" type="java.net.URI"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="baseDir" type="org.apache.hadoop.fs.Path"/> |
| <param name="fileStatus" type="org.apache.hadoop.fs.FileStatus"/> |
| <param name="isArchive" type="boolean"/> |
| <param name="confFileStamp" type="long"/> |
| <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the locally cached file or archive; it could either be |
| previously cached (and valid) or copy it from the {@link FileSystem} now. |
| |
| @param cache the cache to be localized, this should be specified as |
| new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema |
| or hostname:port is provided the file is assumed to be in the filesystem |
| being used in the Configuration |
| @param conf The Confguration file which contains the filesystem |
| @param baseDir The base cache Dir where you wnat to localize the files/archives |
| @param fileStatus The file status on the dfs. |
| @param isArchive if the cache is an archive or a file. In case it is an archive |
| with a .zip or .jar extension it will be unzipped/unjarred automatically |
| and the directory where the archive is unjarred is returned as the Path. |
| In case of a file, the path to the file is returned |
| @param confFileStamp this is the hdfs file modification timestamp to verify that the |
| file to be cached hasn't changed since the job started |
| @param currentWorkDir this is the directory where you would want to create symlinks |
| for the locally cached files/archives |
| @return the path to directory where the archives are unjarred in case of archives, |
| the path to the file where the file is copied locally |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getLocalCache" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="cache" type="java.net.URI"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="baseDir" type="org.apache.hadoop.fs.Path"/> |
| <param name="isArchive" type="boolean"/> |
| <param name="confFileStamp" type="long"/> |
| <param name="currentWorkDir" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the locally cached file or archive; it could either be |
| previously cached (and valid) or copy it from the {@link FileSystem} now. |
| |
| @param cache the cache to be localized, this should be specified as |
| new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema |
| or hostname:port is provided the file is assumed to be in the filesystem |
| being used in the Configuration |
| @param conf The Confguration file which contains the filesystem |
| @param baseDir The base cache Dir where you wnat to localize the files/archives |
| @param isArchive if the cache is an archive or a file. In case it is an archive |
| with a .zip or .jar extension it will be unzipped/unjarred automatically |
| and the directory where the archive is unjarred is returned as the Path. |
| In case of a file, the path to the file is returned |
| @param confFileStamp this is the hdfs file modification timestamp to verify that the |
| file to be cached hasn't changed since the job started |
| @param currentWorkDir this is the directory where you would want to create symlinks |
| for the locally cached files/archives |
| @return the path to directory where the archives are unjarred in case of archives, |
| the path to the file where the file is copied locally |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="releaseCache" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="cache" type="java.net.URI"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[This is the opposite of getlocalcache. When you are done with |
| using the cache, you need to release the cache |
| @param cache The cache URI to be released |
| @param conf configuration which contains the filesystem the cache |
| is contained in. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="makeRelative" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="cache" type="java.net.URI"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getTimestamp" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="cache" type="java.net.URI"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns mtime of a given cache file on hdfs. |
| @param conf configuration |
| @param cache cache file |
| @return mtime of a given cache file on hdfs |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="createAllSymlink" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="jobCacheDir" type="java.io.File"/> |
| <param name="workDir" type="java.io.File"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[This method create symlinks for all files in a given dir in another directory |
| @param conf the configuration |
| @param jobCacheDir the target directory for creating symlinks |
| @param workDir the directory in which the symlinks are created |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="setCacheArchives" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="archives" type="java.net.URI[]"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Set the configuration with the given set of archives |
| @param archives The list of archives that need to be localized |
| @param conf Configuration which will be changed]]> |
| </doc> |
| </method> |
| <method name="setCacheFiles" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="files" type="java.net.URI[]"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Set the configuration with the given set of files |
| @param files The list of files that need to be localized |
| @param conf Configuration which will be changed]]> |
| </doc> |
| </method> |
| <method name="getCacheArchives" return="java.net.URI[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get cache archives set in the Configuration |
| @param conf The configuration which contains the archives |
| @return A URI array of the caches set in the Configuration |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getCacheFiles" return="java.net.URI[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get cache files set in the Configuration |
| @param conf The configuration which contains the files |
| @return A URI array of the files set in the Configuration |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getLocalCacheArchives" return="org.apache.hadoop.fs.Path[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the path array of the localized caches |
| @param conf Configuration that contains the localized archives |
| @return A path array of localized caches |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getLocalCacheFiles" return="org.apache.hadoop.fs.Path[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the path array of the localized files |
| @param conf Configuration that contains the localized files |
| @return A path array of localized files |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getArchiveTimestamps" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Get the timestamps of the archives |
| @param conf The configuration which stored the timestamps |
| @return a string array of timestamps |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getFileTimestamps" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Get the timestamps of the files |
| @param conf The configuration which stored the timestamps |
| @return a string array of timestamps |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="setArchiveTimestamps" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="timestamps" type="java.lang.String"/> |
| <doc> |
| <![CDATA[This is to check the timestamp of the archives to be localized |
| @param conf Configuration which stores the timestamp's |
| @param timestamps comma separated list of timestamps of archives. |
| The order should be the same as the order in which the archives are added.]]> |
| </doc> |
| </method> |
| <method name="setFileTimestamps" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="timestamps" type="java.lang.String"/> |
| <doc> |
| <![CDATA[This is to check the timestamp of the files to be localized |
| @param conf Configuration which stores the timestamp's |
| @param timestamps comma separated list of timestamps of files. |
| The order should be the same as the order in which the files are added.]]> |
| </doc> |
| </method> |
| <method name="setLocalArchives" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="str" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the conf to contain the location for localized archives |
| @param conf The conf to modify to contain the localized caches |
| @param str a comma separated list of local archives]]> |
| </doc> |
| </method> |
| <method name="setLocalFiles" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="str" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the conf to contain the location for localized files |
| @param conf The conf to modify to contain the localized caches |
| @param str a comma separated list of local files]]> |
| </doc> |
| </method> |
| <method name="addCacheArchive" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="uri" type="java.net.URI"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Add a archives to be localized to the conf |
| @param uri The uri of the cache to be localized |
| @param conf Configuration to add the cache to]]> |
| </doc> |
| </method> |
| <method name="addCacheFile" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="uri" type="java.net.URI"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Add a file to be localized to the conf |
| @param uri The uri of the cache to be localized |
| @param conf Configuration to add the cache to]]> |
| </doc> |
| </method> |
| <method name="addFileToClassPath" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="file" type="org.apache.hadoop.fs.Path"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Add an file path to the current set of classpath entries It adds the file |
| to cache as well. |
| |
| @param file Path of the file to be added |
| @param conf Configuration that contains the classpath setting]]> |
| </doc> |
| </method> |
| <method name="getFileClassPaths" return="org.apache.hadoop.fs.Path[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Get the file entries in classpath as an array of Path |
| |
| @param conf Configuration that contains the classpath setting]]> |
| </doc> |
| </method> |
| <method name="addArchiveToClassPath" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="archive" type="org.apache.hadoop.fs.Path"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Add an archive path to the current set of classpath entries. It adds the |
| archive to cache as well. |
| |
| @param archive Path of the archive to be added |
| @param conf Configuration that contains the classpath setting]]> |
| </doc> |
| </method> |
| <method name="getArchiveClassPaths" return="org.apache.hadoop.fs.Path[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Get the archive entries in classpath as an array of Path |
| |
| @param conf Configuration that contains the classpath setting]]> |
| </doc> |
| </method> |
| <method name="createSymlink" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[This method allows you to create symlinks in the current working directory |
| of the task to all the cache files/archives |
| @param conf the jobconf]]> |
| </doc> |
| </method> |
| <method name="getSymlink" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[This method checks to see if symlinks are to be create for the |
| localized cache files in the current working directory |
| @param conf the jobconf |
| @return true if symlinks are to be created- else return false]]> |
| </doc> |
| </method> |
| <method name="checkURIs" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="uriFiles" type="java.net.URI[]"/> |
| <param name="uriArchives" type="java.net.URI[]"/> |
| <doc> |
| <![CDATA[This method checks if there is a conflict in the fragment names |
| of the uris. Also makes sure that each uri has a fragment. It |
| is only to be called if you want to create symlinks for |
| the various archives and files. |
| @param uriFiles The uri array of urifiles |
| @param uriArchives the uri array of uri archives]]> |
| </doc> |
| </method> |
| <method name="purgeCache" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Clear the entire contents of the cache and delete the backing files. This |
| should only be used when the server is reinitializing, because the users |
| are going to lose their files.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Distribute application-specific large, read-only files efficiently. |
| |
| <p><code>DistributedCache</code> is a facility provided by the Map-Reduce |
| framework to cache files (text, archives, jars etc.) needed by applications. |
| </p> |
| |
| <p>Applications specify the files, via urls (hdfs:// or http://) to be cached |
| via the {@link JobConf}. The <code>DistributedCache</code> assumes that the |
| files specified via hdfs:// urls are already present on the |
| {@link FileSystem} at the path specified by the url.</p> |
| |
| <p>The framework will copy the necessary files on to the slave node before |
| any tasks for the job are executed on that node. Its efficiency stems from |
| the fact that the files are only copied once per job and the ability to |
| cache archives which are un-archived on the slaves.</p> |
| |
| <p><code>DistributedCache</code> can be used to distribute simple, read-only |
| data/text files and/or more complex types such as archives, jars etc. |
| Archives (zip files) are un-archived at the slave nodes. Jars maybe be |
| optionally added to the classpath of the tasks, a rudimentary software |
| distribution mechanism. Files have execution permissions. Optionally users |
| can also direct it to symlink the distributed cache file(s) into |
| the working directory of the task.</p> |
| |
| <p><code>DistributedCache</code> tracks modification timestamps of the cache |
| files. Clearly the cache files should not be modified by the application |
| or externally while the job is executing.</p> |
| |
| <p>Here is an illustrative example on how to use the |
| <code>DistributedCache</code>:</p> |
| <p><blockquote><pre> |
| // Setting up the cache for the application |
| |
| 1. Copy the requisite files to the <code>FileSystem</code>: |
| |
| $ bin/hadoop fs -copyFromLocal lookup.dat /myapp/lookup.dat |
| $ bin/hadoop fs -copyFromLocal map.zip /myapp/map.zip |
| $ bin/hadoop fs -copyFromLocal mylib.jar /myapp/mylib.jar |
| |
| 2. Setup the application's <code>JobConf</code>: |
| |
| JobConf job = new JobConf(); |
| DistributedCache.addCacheFile(new URI("/myapp/lookup.dat#lookup.dat"), |
| job); |
| DistributedCache.addCacheArchive(new URI("/myapp/map.zip", job); |
| DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), job); |
| |
| 3. Use the cached files in the {@link Mapper} or {@link Reducer}: |
| |
| public static class MapClass extends MapReduceBase |
| implements Mapper<K, V, K, V> { |
| |
| private Path[] localArchives; |
| private Path[] localFiles; |
| |
| public void configure(JobConf job) { |
| // Get the cached archives/files |
| localArchives = DistributedCache.getLocalCacheArchives(job); |
| localFiles = DistributedCache.getLocalCacheFiles(job); |
| } |
| |
| public void map(K key, V value, |
| OutputCollector<K, V> output, Reporter reporter) |
| throws IOException { |
| // Use data from the cached archives/files here |
| // ... |
| // ... |
| output.collect(k, v); |
| } |
| } |
| |
| </pre></blockquote></p> |
| |
| @see JobConf |
| @see JobClient]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.filecache.DistributedCache --> |
| </package> |
| <package name="org.apache.hadoop.fs"> |
| <!-- start class org.apache.hadoop.fs.BlockLocation --> |
| <class name="BlockLocation" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <constructor name="BlockLocation" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Default Constructor]]> |
| </doc> |
| </constructor> |
| <constructor name="BlockLocation" type="java.lang.String[], java.lang.String[], long, long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructor with host, name, offset and length]]> |
| </doc> |
| </constructor> |
| <method name="getHosts" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the list of hosts (hostname) hosting this block]]> |
| </doc> |
| </method> |
| <method name="getNames" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the list of names (hostname:port) hosting this block]]> |
| </doc> |
| </method> |
| <method name="getOffset" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the start offset of file associated with this block]]> |
| </doc> |
| </method> |
| <method name="getLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the length of the block]]> |
| </doc> |
| </method> |
| <method name="setOffset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="offset" type="long"/> |
| <doc> |
| <![CDATA[Set the start offset of file associated with this block]]> |
| </doc> |
| </method> |
| <method name="setLength" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="length" type="long"/> |
| <doc> |
| <![CDATA[Set the length of block]]> |
| </doc> |
| </method> |
| <method name="setHosts" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="hosts" type="java.lang.String[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Set the hosts hosting this block]]> |
| </doc> |
| </method> |
| <method name="setNames" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="names" type="java.lang.String[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Set the names (host:port) hosting this block]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Implement write of Writable]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Implement readFields of Writable]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.fs.BlockLocation --> |
| <!-- start class org.apache.hadoop.fs.BufferedFSInputStream --> |
| <class name="BufferedFSInputStream" extends="java.io.BufferedInputStream" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.fs.Seekable"/> |
| <implements name="org.apache.hadoop.fs.PositionedReadable"/> |
| <constructor name="BufferedFSInputStream" type="org.apache.hadoop.fs.FSInputStream, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a <code>BufferedFSInputStream</code> |
| with the specified buffer size, |
| and saves its argument, the input stream |
| <code>in</code>, for later use. An internal |
| buffer array of length <code>size</code> |
| is created and stored in <code>buf</code>. |
| |
| @param in the underlying input stream. |
| @param size the buffer size. |
| @exception IllegalArgumentException if size <= 0.]]> |
| </doc> |
| </constructor> |
| <method name="getPos" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="skip" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="n" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="seek" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="pos" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="seekToNewSource" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="targetPos" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="read" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="position" type="long"/> |
| <param name="buffer" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="length" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFully" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="position" type="long"/> |
| <param name="buffer" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="length" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFully" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="position" type="long"/> |
| <param name="buffer" type="byte[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A class optimizes reading from FSInputStream by bufferring]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.BufferedFSInputStream --> |
| <!-- start class org.apache.hadoop.fs.ChecksumException --> |
| <class name="ChecksumException" extends="java.io.IOException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ChecksumException" type="java.lang.String, long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getPos" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[Thrown for checksum errors.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.ChecksumException --> |
| <!-- start class org.apache.hadoop.fs.ChecksumFileSystem --> |
| <class name="ChecksumFileSystem" extends="org.apache.hadoop.fs.FilterFileSystem" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ChecksumFileSystem" type="org.apache.hadoop.fs.FileSystem" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getApproxChkSumLength" return="double" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="size" type="long"/> |
| </method> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| </method> |
| <method name="getRawFileSystem" return="org.apache.hadoop.fs.FileSystem" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[get the raw file system]]> |
| </doc> |
| </method> |
| <method name="getChecksumFile" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="file" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Return the name of the checksum file associated with a file.]]> |
| </doc> |
| </method> |
| <method name="isChecksumFile" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="file" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Return true iff file is a checksum file name.]]> |
| </doc> |
| </method> |
| <method name="getChecksumFileLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="file" type="org.apache.hadoop.fs.Path"/> |
| <param name="fileSize" type="long"/> |
| <doc> |
| <![CDATA[Return the length of the checksum file given the size of the |
| actual file.]]> |
| </doc> |
| </method> |
| <method name="getBytesPerSum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the bytes Per Checksum]]> |
| </doc> |
| </method> |
| <method name="open" return="org.apache.hadoop.fs.FSDataInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="bufferSize" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Opens an FSDataInputStream at the indicated Path. |
| @param f the file name to open |
| @param bufferSize the size of the buffer to be used.]]> |
| </doc> |
| </method> |
| <method name="getChecksumLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="size" type="long"/> |
| <param name="bytesPerSum" type="int"/> |
| <doc> |
| <![CDATA[Calculated the length of the checksum file in bytes. |
| @param size the length of the data file in bytes |
| @param bytesPerSum the number of bytes in a checksum block |
| @return the number of bytes in the checksum file]]> |
| </doc> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="bufferSize" type="int"/> |
| <param name="replication" type="short"/> |
| <param name="blockSize" type="long"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress |
| reporting. |
| @param f the file name to open |
| @param overwrite if a file with this name already exists, then if true, |
| the file will be overwritten, and if false an error will be thrown. |
| @param bufferSize the size of the buffer to be used. |
| @param replication required block replication for the file.]]> |
| </doc> |
| </method> |
| <method name="setReplication" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="replication" type="short"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Set replication for an existing file. |
| Implement the abstract <tt>setReplication</tt> of <tt>FileSystem</tt> |
| @param src file name |
| @param replication new replication |
| @throws IOException |
| @return true if successful; |
| false if file does not exist or is a directory]]> |
| </doc> |
| </method> |
| <method name="rename" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Rename files/dirs]]> |
| </doc> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="recursive" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Implement the delete(Path, boolean) in checksum |
| file system.]]> |
| </doc> |
| </method> |
| <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[List the statuses of the files/directories in the given path if the path is |
| a directory. |
| |
| @param f |
| given path |
| @return the statuses of the files/directories in the given patch |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="mkdirs" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="copyFromLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="delSrc" type="boolean"/> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="copyToLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="delSrc" type="boolean"/> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The src file is under FS, and the dst is on the local disk. |
| Copy it from FS control to the local dst name.]]> |
| </doc> |
| </method> |
| <method name="copyToLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <param name="copyCrc" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The src file is under FS, and the dst is on the local disk. |
| Copy it from FS control to the local dst name. |
| If src and dst are directories, the copyCrc parameter |
| determines whether to copy CRC files.]]> |
| </doc> |
| </method> |
| <method name="startLocalOutput" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="completeLocalOutput" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="reportChecksumFailure" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/> |
| <param name="inPos" type="long"/> |
| <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/> |
| <param name="sumsPos" type="long"/> |
| <doc> |
| <![CDATA[Report a checksum error to the file system. |
| @param f the file name containing the error |
| @param in the stream open on the file |
| @param inPos the position of the beginning of the bad data in the file |
| @param sums the stream open on the checksum file |
| @param sumsPos the position of the beginning of the bad data in the checksum file |
| @return if retry is neccessary]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Abstract Checksumed FileSystem. |
| It provide a basice implementation of a Checksumed FileSystem, |
| which creates a checksum file for each raw file. |
| It generates & verifies checksums at the client side.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.ChecksumFileSystem --> |
| <!-- start class org.apache.hadoop.fs.ContentSummary --> |
| <class name="ContentSummary" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <constructor name="ContentSummary" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructor]]> |
| </doc> |
| </constructor> |
| <constructor name="ContentSummary" type="long, long, long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructor]]> |
| </doc> |
| </constructor> |
| <method name="getLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the length]]> |
| </doc> |
| </method> |
| <method name="getDirectoryCount" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the directory count]]> |
| </doc> |
| </method> |
| <method name="getFileCount" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the file count]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Store the summary of a content (a directory or a file).]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.ContentSummary --> |
| <!-- start class org.apache.hadoop.fs.DF --> |
| <class name="DF" extends="org.apache.hadoop.util.Shell" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="DF" type="java.io.File, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <constructor name="DF" type="java.io.File, long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <method name="getDirPath" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getFilesystem" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getCapacity" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getUsed" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getAvailable" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getPercentUsed" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getMount" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getExecString" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </method> |
| <method name="parseExecResult" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="lines" type="java.io.BufferedReader"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| </method> |
| <field name="DF_INTERVAL_DEFAULT" type="long" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Filesystem disk space usage statistics. Uses the unix 'df' program. |
| Tested on Linux, FreeBSD, Cygwin.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.DF --> |
| <!-- start class org.apache.hadoop.fs.DU --> |
| <class name="DU" extends="org.apache.hadoop.util.Shell" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="DU" type="java.io.File, long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <constructor name="DU" type="java.io.File, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <method name="decDfsUsed" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="long"/> |
| </method> |
| <method name="incDfsUsed" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="long"/> |
| </method> |
| <method name="getUsed" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getDirPath" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getExecString" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </method> |
| <method name="parseExecResult" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="lines" type="java.io.BufferedReader"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| </method> |
| <doc> |
| <![CDATA[Filesystem disk space usage statistics. Uses the unix 'du' program]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.DU --> |
| <!-- start class org.apache.hadoop.fs.FileStatus --> |
| <class name="FileStatus" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <implements name="java.lang.Comparable"/> |
| <constructor name="FileStatus" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="FileStatus" type="long, boolean, int, long, long, org.apache.hadoop.fs.Path" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="FileStatus" type="long, boolean, int, long, long, org.apache.hadoop.fs.permission.FsPermission, java.lang.String, java.lang.String, org.apache.hadoop.fs.Path" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getLen" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="isDir" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Is this a directory? |
| @return true if this is a directory]]> |
| </doc> |
| </method> |
| <method name="getBlockSize" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the block size of the file. |
| @return the number of bytes]]> |
| </doc> |
| </method> |
| <method name="getReplication" return="short" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the replication factor of a file. |
| @return the replication factor of a file.]]> |
| </doc> |
| </method> |
| <method name="getModificationTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the modification time of the file. |
| @return the modification time of file in milliseconds since January 1, 1970 UTC.]]> |
| </doc> |
| </method> |
| <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get FsPermission associated with the file. |
| @return permssion. If a filesystem does not have a notion of permissions |
| or if permissions could not be determined, then default |
| permissions equivalent of "rwxrwxrwx" is returned.]]> |
| </doc> |
| </method> |
| <method name="getOwner" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the owner of the file. |
| @return owner of the file. The string could be empty if there is no |
| notion of owner of a file in a filesystem or if it could not |
| be determined (rare).]]> |
| </doc> |
| </method> |
| <method name="getGroup" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the group associated with the file. |
| @return group for the file. The string could be empty if there is no |
| notion of group of a file in a filesystem or if it could not |
| be determined (rare).]]> |
| </doc> |
| </method> |
| <method name="getPath" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="setPermission" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <doc> |
| <![CDATA[Sets permission. |
| @param permission if permission is null, default value is set]]> |
| </doc> |
| </method> |
| <method name="setOwner" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="owner" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Sets owner. |
| @param owner if it is null, default value is set]]> |
| </doc> |
| </method> |
| <method name="setGroup" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="group" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Sets group. |
| @param group if it is null, default value is set]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Compare this object to another object |
| |
| @param o the object to be compared. |
| @return a negative integer, zero, or a positive integer as this object |
| is less than, equal to, or greater than the specified object. |
| |
| @throws ClassCastException if the specified object's is not of |
| type FileStatus]]> |
| </doc> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Compare if this object is equal to another object |
| @param o the object to be compared. |
| @return true if two file status has the same path name; false if not.]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns a hash code value for the object, which is defined as |
| the hash code of the path name. |
| |
| @return a hash code value for the path name.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Interface that represents the client side information for a file.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.FileStatus --> |
| <!-- start class org.apache.hadoop.fs.FileSystem --> |
| <class name="FileSystem" extends="org.apache.hadoop.conf.Configured" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.io.Closeable"/> |
| <constructor name="FileSystem" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="parseArgs" return="org.apache.hadoop.fs.FileSystem" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <param name="i" type="int"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Parse the cmd-line args, starting at i. Remove consumed args |
| from array. We expect param in the form: |
| '-local | -dfs <namenode:port>']]> |
| </doc> |
| </method> |
| <method name="get" return="org.apache.hadoop.fs.FileSystem" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the configured filesystem implementation.]]> |
| </doc> |
| </method> |
| <method name="getDefaultUri" return="java.net.URI" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Get the default filesystem URI from a configuration. |
| @param conf the configuration to access |
| @return the uri of the default filesystem]]> |
| </doc> |
| </method> |
| <method name="setDefaultUri" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="uri" type="java.net.URI"/> |
| <doc> |
| <![CDATA[Set the default filesystem URI in a configuration. |
| @param conf the configuration to alter |
| @param uri the new default filesystem uri]]> |
| </doc> |
| </method> |
| <method name="setDefaultUri" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="uri" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the default filesystem URI in a configuration. |
| @param conf the configuration to alter |
| @param uri the new default filesystem uri]]> |
| </doc> |
| </method> |
| <method name="initialize" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.net.URI"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Called after a new FileSystem instance is constructed. |
| @param name a uri whose authority section names the host, port, etc. |
| for this FileSystem |
| @param conf the configuration]]> |
| </doc> |
| </method> |
| <method name="getUri" return="java.net.URI" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]> |
| </doc> |
| </method> |
| <method name="getName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="call #getUri() instead."> |
| <doc> |
| <![CDATA[@deprecated call #getUri() instead.]]> |
| </doc> |
| </method> |
| <method name="getNamed" return="org.apache.hadoop.fs.FileSystem" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="call #get(URI,Configuration) instead."> |
| <param name="name" type="java.lang.String"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@deprecated call #get(URI,Configuration) instead.]]> |
| </doc> |
| </method> |
| <method name="getLocal" return="org.apache.hadoop.fs.LocalFileSystem" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the local file syste |
| @param conf the configuration to configure the file system with |
| @return a LocalFileSystem]]> |
| </doc> |
| </method> |
| <method name="get" return="org.apache.hadoop.fs.FileSystem" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="uri" type="java.net.URI"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the FileSystem for this URI's scheme and authority. The scheme |
| of the URI determines a configuration property name, |
| <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class. |
| The entire URI is passed to the FileSystem instance's initialize method.]]> |
| </doc> |
| </method> |
| <method name="closeAll" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Close all cached filesystems. Be sure those filesystems are not |
| used anymore. |
| |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="makeQualified" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Make sure that a path specifies a FileSystem.]]> |
| </doc> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="file" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[create a file with the provided permission |
| The permission of the file is set to be the provided permission as in |
| setPermission, not permission&~umask |
| |
| It is implemented using two RPCs. It is understood that it is inefficient, |
| but the implementation is thread-safe. The other option is to change the |
| value of umask in configuration to be 0, but it is not thread-safe. |
| |
| @param fs file system handle |
| @param file the name of the file to be created |
| @param permission the permission of the file |
| @return an output stream |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="mkdirs" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="dir" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[create a directory with the provided permission |
| The permission of the directory is set to be the provided permission as in |
| setPermission, not permission&~umask |
| |
| @see #create(FileSystem, Path, FsPermission) |
| |
| @param fs file system handle |
| @param dir the name of the directory to be created |
| @param permission the permission of the directory |
| @return true if the directory creation succeeds; false otherwise |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="checkPath" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Check that a Path belongs to this FileSystem.]]> |
| </doc> |
| </method> |
| <method name="getFileCacheHints" return="java.lang.String[][]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="Use getFileBlockLocations() instead |
| |
| Return a 2D array of size 1x1 or greater, containing hostnames |
| where portions of the given file can be found. For a nonexistent |
| file or regions, null will be returned. |
| |
| This call is most helpful with DFS, where it returns |
| hostnames of machines that contain the given file. |
| |
| The FileSystem will simply return an elt containing 'localhost'."> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="start" type="long"/> |
| <param name="len" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@deprecated Use getFileBlockLocations() instead |
| |
| Return a 2D array of size 1x1 or greater, containing hostnames |
| where portions of the given file can be found. For a nonexistent |
| file or regions, null will be returned. |
| |
| This call is most helpful with DFS, where it returns |
| hostnames of machines that contain the given file. |
| |
| The FileSystem will simply return an elt containing 'localhost'.]]> |
| </doc> |
| </method> |
| <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="start" type="long"/> |
| <param name="len" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return an array containing hostnames, offset and size of |
| portions of the given file. For a nonexistent |
| file or regions, null will be returned. |
| |
| This call is most helpful with DFS, where it returns |
| hostnames of machines that contain the given file. |
| |
| The FileSystem will simply return an elt containing 'localhost'.]]> |
| </doc> |
| </method> |
| <method name="open" return="org.apache.hadoop.fs.FSDataInputStream" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="bufferSize" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Opens an FSDataInputStream at the indicated Path. |
| @param f the file name to open |
| @param bufferSize the size of the buffer to be used.]]> |
| </doc> |
| </method> |
| <method name="open" return="org.apache.hadoop.fs.FSDataInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Opens an FSDataInputStream at the indicated Path. |
| @param f the file to open]]> |
| </doc> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Opens an FSDataOutputStream at the indicated Path. |
| Files are overwritten by default.]]> |
| </doc> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="overwrite" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Opens an FSDataOutputStream at the indicated Path.]]> |
| </doc> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create an FSDataOutputStream at the indicated Path with write-progress |
| reporting. |
| Files are overwritten by default.]]> |
| </doc> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="replication" type="short"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Opens an FSDataOutputStream at the indicated Path. |
| Files are overwritten by default.]]> |
| </doc> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="replication" type="short"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress |
| reporting. |
| Files are overwritten by default.]]> |
| </doc> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="bufferSize" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Opens an FSDataOutputStream at the indicated Path. |
| @param f the file name to open |
| @param overwrite if a file with this name already exists, then if true, |
| the file will be overwritten, and if false an error will be thrown. |
| @param bufferSize the size of the buffer to be used.]]> |
| </doc> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="bufferSize" type="int"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress |
| reporting. |
| @param f the file name to open |
| @param overwrite if a file with this name already exists, then if true, |
| the file will be overwritten, and if false an error will be thrown. |
| @param bufferSize the size of the buffer to be used.]]> |
| </doc> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="bufferSize" type="int"/> |
| <param name="replication" type="short"/> |
| <param name="blockSize" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Opens an FSDataOutputStream at the indicated Path. |
| @param f the file name to open |
| @param overwrite if a file with this name already exists, then if true, |
| the file will be overwritten, and if false an error will be thrown. |
| @param bufferSize the size of the buffer to be used. |
| @param replication required block replication for the file.]]> |
| </doc> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="bufferSize" type="int"/> |
| <param name="replication" type="short"/> |
| <param name="blockSize" type="long"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress |
| reporting. |
| @param f the file name to open |
| @param overwrite if a file with this name already exists, then if true, |
| the file will be overwritten, and if false an error will be thrown. |
| @param bufferSize the size of the buffer to be used. |
| @param replication required block replication for the file.]]> |
| </doc> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="bufferSize" type="int"/> |
| <param name="replication" type="short"/> |
| <param name="blockSize" type="long"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Opens an FSDataOutputStream at the indicated Path with write-progress |
| reporting. |
| @param f the file name to open |
| @param permission |
| @param overwrite if a file with this name already exists, then if true, |
| the file will be overwritten, and if false an error will be thrown. |
| @param bufferSize the size of the buffer to be used. |
| @param replication required block replication for the file. |
| @param blockSize |
| @param progress |
| @throws IOException |
| @see #setPermission(Path, FsPermission)]]> |
| </doc> |
| </method> |
| <method name="createNewFile" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Creates the given Path as a brand-new zero-length file. If |
| create fails, or if it already existed, return false.]]> |
| </doc> |
| </method> |
| <method name="getReplication" return="short" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="Use getFileStatus() instead"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get replication. |
| |
| @deprecated Use getFileStatus() instead |
| @param src file name |
| @return file replication |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="setReplication" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="replication" type="short"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Set replication for an existing file. |
| |
| @param src file name |
| @param replication new replication |
| @throws IOException |
| @return true if successful; |
| false if file does not exist or is a directory]]> |
| </doc> |
| </method> |
| <method name="rename" return="boolean" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Renames Path src to Path dst. Can take place on local fs |
| or remote DFS.]]> |
| </doc> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="Use delete(Path, boolean) instead"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@deprecated Use delete(Path, boolean) instead]]> |
| </doc> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="recursive" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Delete a file. |
| |
| @param f the path to delete. |
| @param recursive if path is a directory and set to |
| true, the directory is deleted else throws an exception. In |
| case of a file the recursive can be set to either true or false. |
| @return true if delete is successful else false. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="exists" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Check if exists. |
| @param f source file]]> |
| </doc> |
| </method> |
| <method name="isDirectory" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="Use getFileStatus() instead"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@deprecated Use getFileStatus() instead]]> |
| </doc> |
| </method> |
| <method name="isFile" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[True iff the named path is a regular file.]]> |
| </doc> |
| </method> |
| <method name="getLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="Use getFileStatus() instead"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@deprecated Use getFileStatus() instead]]> |
| </doc> |
| </method> |
| <method name="getContentLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="Use {@link #getContentSummary(Path)}."> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the number of bytes of the given path |
| If <i>f</i> is a file, return the size of the file; |
| If <i>f</i> is a directory, return the size of the directory tree |
| @deprecated Use {@link #getContentSummary(Path)}.]]> |
| </doc> |
| </method> |
| <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the {@link ContentSummary} of a given {@link Path}.]]> |
| </doc> |
| </method> |
| <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[List the statuses of the files/directories in the given path if the path is |
| a directory. |
| |
| @param f |
| given path |
| @return the statuses of the files/directories in the given patch |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="filter" type="org.apache.hadoop.fs.PathFilter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Filter files/directories in the given path using the user-supplied path |
| filter. |
| |
| @param f |
| a path name |
| @param filter |
| the user-supplied path filter |
| @return an array of FileStatus objects for the files under the given path |
| after applying the filter |
| @throws IOException |
| if encounter any problem while fetching the status]]> |
| </doc> |
| </method> |
| <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="files" type="org.apache.hadoop.fs.Path[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Filter files/directories in the given list of paths using default |
| path filter. |
| |
| @param files |
| a list of paths |
| @return a list of statuses for the files under the given paths after |
| applying the filter default Path filter |
| @exception IOException]]> |
| </doc> |
| </method> |
| <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="files" type="org.apache.hadoop.fs.Path[]"/> |
| <param name="filter" type="org.apache.hadoop.fs.PathFilter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Filter files/directories in the given list of paths using user-supplied |
| path filter. |
| |
| @param files |
| a list of paths |
| @param filter |
| the user-supplied path filter |
| @return a list of statuses for the files under the given paths after |
| applying the filter |
| @exception IOException]]> |
| </doc> |
| </method> |
| <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="pathPattern" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[<p>Return all the files that match filePattern and are not checksum |
| files. Results are sorted by their names. |
| |
| <p> |
| A filename pattern is composed of <i>regular</i> characters and |
| <i>special pattern matching</i> characters, which are: |
| |
| <dl> |
| <dd> |
| <dl> |
| <p> |
| <dt> <tt> ? </tt> |
| <dd> Matches any single character. |
| |
| <p> |
| <dt> <tt> * </tt> |
| <dd> Matches zero or more characters. |
| |
| <p> |
| <dt> <tt> [<i>abc</i>] </tt> |
| <dd> Matches a single character from character set |
| <tt>{<i>a,b,c</i>}</tt>. |
| |
| <p> |
| <dt> <tt> [<i>a</i>-<i>b</i>] </tt> |
| <dd> Matches a single character from the character range |
| <tt>{<i>a...b</i>}</tt>. Note that character <tt><i>a</i></tt> must be |
| lexicographically less than or equal to character <tt><i>b</i></tt>. |
| |
| <p> |
| <dt> <tt> [^<i>a</i>] </tt> |
| <dd> Matches a single character that is not from character set or range |
| <tt>{<i>a</i>}</tt>. Note that the <tt>^</tt> character must occur |
| immediately to the right of the opening bracket. |
| |
| <p> |
| <dt> <tt> \<i>c</i> </tt> |
| <dd> Removes (escapes) any special meaning of character <i>c</i>. |
| |
| <p> |
| <dt> <tt> {ab,cd} </tt> |
| <dd> Matches a string from the string set <tt>{<i>ab, cd</i>} </tt> |
| |
| <p> |
| <dt> <tt> {ab,c{de,fh}} </tt> |
| <dd> Matches a string from the string set <tt>{<i>ab, cde, cfh</i>}</tt> |
| |
| </dl> |
| </dd> |
| </dl> |
| |
| @param pathPattern a regular expression specifying a pth pattern |
| |
| @return an array of paths that match the path pattern |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="globStatus" return="org.apache.hadoop.fs.FileStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="pathPattern" type="org.apache.hadoop.fs.Path"/> |
| <param name="filter" type="org.apache.hadoop.fs.PathFilter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return an array of FileStatus objects whose path names match pathPattern |
| and is accepted by the user-supplied path filter. Results are sorted by |
| their path names. |
| Return null if pathPattern has no glob and the path does not exist. |
| Return an empty array if pathPattern has a glob and no path matches it. |
| |
| @param pathPattern |
| a regular expression specifying the path pattern |
| @param filter |
| a user-supplied path filter |
| @return an array of FileStatus objects |
| @throws IOException if any I/O error occurs when fetching file status]]> |
| </doc> |
| </method> |
| <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the current user's home directory in this filesystem. |
| The default implementation returns "/user/$USER/".]]> |
| </doc> |
| </method> |
| <method name="setWorkingDirectory" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="new_dir" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Set the current working directory for the given file system. All relative |
| paths will be resolved relative to it. |
| |
| @param new_dir]]> |
| </doc> |
| </method> |
| <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the current working directory for the given file system |
| @return the directory pathname]]> |
| </doc> |
| </method> |
| <method name="mkdirs" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Call {@link #mkdirs(Path, FsPermission)} with default permission.]]> |
| </doc> |
| </method> |
| <method name="mkdirs" return="boolean" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Make the given file and all non-existent parents into |
| directories. Has the semantics of Unix 'mkdir -p'. |
| Existence of the directory hierarchy is not an error.]]> |
| </doc> |
| </method> |
| <method name="copyFromLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The src file is on the local disk. Add it to FS at |
| the given dst name and the source is kept intact afterwards]]> |
| </doc> |
| </method> |
| <method name="moveFromLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="srcs" type="org.apache.hadoop.fs.Path[]"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The src files is on the local disk. Add it to FS at |
| the given dst name, removing the source afterwards.]]> |
| </doc> |
| </method> |
| <method name="moveFromLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The src file is on the local disk. Add it to FS at |
| the given dst name, removing the source afterwards.]]> |
| </doc> |
| </method> |
| <method name="copyFromLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="delSrc" type="boolean"/> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The src file is on the local disk. Add it to FS at |
| the given dst name. |
| delSrc indicates if the source should be removed]]> |
| </doc> |
| </method> |
| <method name="copyFromLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="delSrc" type="boolean"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="srcs" type="org.apache.hadoop.fs.Path[]"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The src files are on the local disk. Add it to FS at |
| the given dst name. |
| delSrc indicates if the source should be removed]]> |
| </doc> |
| </method> |
| <method name="copyFromLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="delSrc" type="boolean"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The src file is on the local disk. Add it to FS at |
| the given dst name. |
| delSrc indicates if the source should be removed]]> |
| </doc> |
| </method> |
| <method name="copyToLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The src file is under FS, and the dst is on the local disk. |
| Copy it from FS control to the local dst name.]]> |
| </doc> |
| </method> |
| <method name="moveToLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The src file is under FS, and the dst is on the local disk. |
| Copy it from FS control to the local dst name. |
| Remove the source afterwards]]> |
| </doc> |
| </method> |
| <method name="copyToLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="delSrc" type="boolean"/> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The src file is under FS, and the dst is on the local disk. |
| Copy it from FS control to the local dst name. |
| delSrc indicates if the src will be removed or not.]]> |
| </doc> |
| </method> |
| <method name="startLocalOutput" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns a local File that the user can write output to. The caller |
| provides both the eventual FS target name and the local working |
| file. If the FS is local, we write directly into the target. If |
| the FS is remote, we write into the tmp local area.]]> |
| </doc> |
| </method> |
| <method name="completeLocalOutput" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Called when we're all done writing to the target. A local FS will |
| do nothing, because we've written to exactly the right place. A remote |
| FS will copy the contents of tmpLocalFile to the correct target at |
| fsOutputFile.]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[No more filesystem operations are needed. Will |
| release any held locks.]]> |
| </doc> |
| </method> |
| <method name="getUsed" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the total size of all files in the filesystem.]]> |
| </doc> |
| </method> |
| <method name="getBlockSize" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="Use getFileStatus() instead"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@deprecated Use getFileStatus() instead]]> |
| </doc> |
| </method> |
| <method name="getDefaultBlockSize" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the number of bytes that large input files should be optimally |
| be split into to minimize i/o time.]]> |
| </doc> |
| </method> |
| <method name="getDefaultReplication" return="short" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the default replication.]]> |
| </doc> |
| </method> |
| <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return a file status object that represents the path. |
| @param f The path we want information from |
| @return a FileStatus object |
| @throws FileNotFoundException when the path does not exist; |
| IOException see specific implementation]]> |
| </doc> |
| </method> |
| <method name="setPermission" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="p" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Set permission of a path. |
| @param p |
| @param permission]]> |
| </doc> |
| </method> |
| <method name="setOwner" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="p" type="org.apache.hadoop.fs.Path"/> |
| <param name="username" type="java.lang.String"/> |
| <param name="groupname" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Set owner of a path (i.e. a file or a directory). |
| The parameters username and groupname cannot both be null. |
| @param p The path |
| @param username If it is null, the original username remains unchanged. |
| @param groupname If it is null, the original groupname remains unchanged.]]> |
| </doc> |
| </method> |
| <method name="getStatistics" return="org.apache.hadoop.fs.FileSystem.Statistics" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="cls" type="java.lang.Class<? extends org.apache.hadoop.fs.FileSystem>"/> |
| <doc> |
| <![CDATA[Get the statistics for a particular file system |
| @param cls the class to lookup |
| @return a statistics object]]> |
| </doc> |
| </method> |
| <method name="printStatistics" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="statistics" type="org.apache.hadoop.fs.FileSystem.Statistics" |
| transient="false" volatile="false" |
| static="false" final="true" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The statistics for this file system.]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[An abstract base class for a fairly generic filesystem. It |
| may be implemented as a distributed filesystem, or as a "local" |
| one that reflects the locally-connected disk. The local version |
| exists for small Hadoop instances and for testing. |
| |
| <p> |
| |
| All user code that may potentially use the Hadoop Distributed |
| File System should be written to use a FileSystem object. The |
| Hadoop DFS is a multi-machine system that appears as a single |
| disk. It's useful because of its fault tolerance and potentially |
| very large capacity. |
| |
| <p> |
| The local implementation is {@link LocalFileSystem} and distributed |
| implementation is {@link DistributedFileSystem}.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.FileSystem --> |
| <!-- start class org.apache.hadoop.fs.FileSystem.Statistics --> |
| <class name="FileSystem.Statistics" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="FileSystem.Statistics" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="incrementBytesRead" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="newBytes" type="long"/> |
| <doc> |
| <![CDATA[Increment the bytes read in the statistics |
| @param newBytes the additional bytes read]]> |
| </doc> |
| </method> |
| <method name="incrementBytesWritten" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="newBytes" type="long"/> |
| <doc> |
| <![CDATA[Increment the bytes written in the statistics |
| @param newBytes the additional bytes written]]> |
| </doc> |
| </method> |
| <method name="getBytesRead" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the total number of bytes read |
| @return the number of bytes]]> |
| </doc> |
| </method> |
| <method name="getBytesWritten" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the total number of bytes written |
| @return the number of bytes]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.fs.FileSystem.Statistics --> |
| <!-- start class org.apache.hadoop.fs.FileUtil --> |
| <class name="FileUtil" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="FileUtil" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/> |
| <doc> |
| <![CDATA[convert an array of FileStatus to an array of Path |
| |
| @param stats |
| an array of FileStatus objects |
| @return an array of paths corresponding to the input]]> |
| </doc> |
| </method> |
| <method name="stat2Paths" return="org.apache.hadoop.fs.Path[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="stats" type="org.apache.hadoop.fs.FileStatus[]"/> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[convert an array of FileStatus to an array of Path. |
| If stats if null, return path |
| @param stats |
| an array of FileStatus objects |
| @param path |
| default path to return in stats is null |
| @return an array of paths corresponding to the input]]> |
| </doc> |
| </method> |
| <method name="fullyDelete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dir" type="java.io.File"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Delete a directory and all its contents. If |
| we return false, the directory may be partially-deleted.]]> |
| </doc> |
| </method> |
| <method name="fullyDelete" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="dir" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Recursively delete a directory. |
| |
| @param fs {@link FileSystem} on which the path is present |
| @param dir directory to recursively delete |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="copy" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <param name="deleteSource" type="boolean"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Copy files between FileSystems.]]> |
| </doc> |
| </method> |
| <method name="copy" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="srcs" type="org.apache.hadoop.fs.Path[]"/> |
| <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <param name="deleteSource" type="boolean"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="copy" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <param name="deleteSource" type="boolean"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Copy files between FileSystems.]]> |
| </doc> |
| </method> |
| <method name="copyMerge" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="srcDir" type="org.apache.hadoop.fs.Path"/> |
| <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="dstFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="deleteSource" type="boolean"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="addString" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Copy all files in a directory to one output file (merge).]]> |
| </doc> |
| </method> |
| <method name="copy" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.io.File"/> |
| <param name="dstFS" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <param name="deleteSource" type="boolean"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Copy local files to a FileSystem.]]> |
| </doc> |
| </method> |
| <method name="copy" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="srcFS" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="java.io.File"/> |
| <param name="deleteSource" type="boolean"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Copy FileSystem files to local files.]]> |
| </doc> |
| </method> |
| <method name="makeShellPath" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="filename" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Convert a os-native filename to a path that works for the shell. |
| @param filename The filename to convert |
| @return The unix pathname |
| @throws IOException on windows, there can be problems with the subprocess]]> |
| </doc> |
| </method> |
| <method name="makeShellPath" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="file" type="java.io.File"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Convert a os-native filename to a path that works for the shell. |
| @param file The filename to convert |
| @return The unix pathname |
| @throws IOException on windows, there can be problems with the subprocess]]> |
| </doc> |
| </method> |
| <method name="getDU" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dir" type="java.io.File"/> |
| <doc> |
| <![CDATA[Takes an input dir and returns the du on that local directory. Very basic |
| implementation. |
| |
| @param dir |
| The input dir to get the disk space of this local dir |
| @return The total disk space of the input local directory]]> |
| </doc> |
| </method> |
| <method name="unZip" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="inFile" type="java.io.File"/> |
| <param name="unzipDir" type="java.io.File"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Given a File input it will unzip the file in a the unzip directory |
| passed as the second parameter |
| @param inFile The zip file as input |
| @param unzipDir The unzip directory where to unzip the zip file. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="symLink" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="target" type="java.lang.String"/> |
| <param name="linkname" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a soft link between a src and destination |
| only on a local disk. HDFS does not support this |
| @param target the target for symlink |
| @param linkname the symlink |
| @return value returned by the command]]> |
| </doc> |
| </method> |
| <method name="chmod" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="filename" type="java.lang.String"/> |
| <param name="perm" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <exception name="InterruptedException" type="java.lang.InterruptedException"/> |
| <doc> |
| <![CDATA[Change the permissions on a filename. |
| @param filename the name of the file to change |
| @param perm the permission string |
| @return the exit code from the command |
| @throws IOException |
| @throws InterruptedException]]> |
| </doc> |
| </method> |
| <method name="createLocalTempFile" return="java.io.File" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <param name="basefile" type="java.io.File"/> |
| <param name="prefix" type="java.lang.String"/> |
| <param name="isDeleteOnExit" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a tmp file for a base file. |
| @param basefile the base file of the tmp |
| @param prefix file name prefix of tmp |
| @param isDeleteOnExit if true, the tmp will be deleted when the VM exits |
| @return a newly created tmp file |
| @exception IOException If a tmp file cannot created |
| @see java.io.File#createTempFile(String, String, File) |
| @see java.io.File#deleteOnExit()]]> |
| </doc> |
| </method> |
| <method name="replaceFile" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.io.File"/> |
| <param name="target" type="java.io.File"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Move the src file to the name specified by target. |
| @param src the source file |
| @param target the target file |
| @exception IOException If this operation fails]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A collection of file-processing util methods]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.FileUtil --> |
| <!-- start class org.apache.hadoop.fs.FileUtil.HardLink --> |
| <class name="FileUtil.HardLink" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="FileUtil.HardLink" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="createHardLink" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="target" type="java.io.File"/> |
| <param name="linkName" type="java.io.File"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Creates a hardlink]]> |
| </doc> |
| </method> |
| <method name="getLinkCount" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fileName" type="java.io.File"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Retrieves the number of links to the specified file.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Class for creating hardlinks. |
| Supports Unix, Cygwin, WindXP.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.FileUtil.HardLink --> |
| <!-- start class org.apache.hadoop.fs.FilterFileSystem --> |
| <class name="FilterFileSystem" extends="org.apache.hadoop.fs.FileSystem" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="FilterFileSystem" type="org.apache.hadoop.fs.FileSystem" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="initialize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.net.URI"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Called after a new FileSystem instance is constructed. |
| @param name a uri whose authority section names the host, port, etc. |
| for this FileSystem |
| @param conf the configuration]]> |
| </doc> |
| </method> |
| <method name="getUri" return="java.net.URI" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns a URI whose scheme and authority identify this FileSystem.]]> |
| </doc> |
| </method> |
| <method name="getName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="call #getUri() instead."> |
| <doc> |
| <![CDATA[@deprecated call #getUri() instead.]]> |
| </doc> |
| </method> |
| <method name="makeQualified" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Make sure that a path specifies a FileSystem.]]> |
| </doc> |
| </method> |
| <method name="checkPath" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Check that a Path belongs to this FileSystem.]]> |
| </doc> |
| </method> |
| <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="start" type="long"/> |
| <param name="len" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return an array containing hostnames, offset and size of |
| portions of the given file. For a nonexistent |
| file or regions, null will be returned. |
| |
| This call is most helpful with DFS, where it returns |
| hostnames of machines that contain the given file. |
| |
| The FileSystem will simply return an elt containing 'localhost'.]]> |
| </doc> |
| </method> |
| <method name="open" return="org.apache.hadoop.fs.FSDataInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="bufferSize" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Opens an FSDataInputStream at the indicated Path. |
| @param f the file name to open |
| @param bufferSize the size of the buffer to be used.]]> |
| </doc> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="bufferSize" type="int"/> |
| <param name="replication" type="short"/> |
| <param name="blockSize" type="long"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="setReplication" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="replication" type="short"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Set replication for an existing file. |
| |
| @param src file name |
| @param replication new replication |
| @throws IOException |
| @return true if successful; |
| false if file does not exist or is a directory]]> |
| </doc> |
| </method> |
| <method name="rename" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Renames Path src to Path dst. Can take place on local fs |
| or remote DFS.]]> |
| </doc> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Delete a file]]> |
| </doc> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="recursive" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Delete a file]]> |
| </doc> |
| </method> |
| <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[List files in a directory.]]> |
| </doc> |
| </method> |
| <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="setWorkingDirectory" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="newDir" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Set the current working directory for the given file system. All relative |
| paths will be resolved relative to it. |
| |
| @param newDir]]> |
| </doc> |
| </method> |
| <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the current working directory for the given file system |
| |
| @return the directory pathname]]> |
| </doc> |
| </method> |
| <method name="mkdirs" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="copyFromLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="delSrc" type="boolean"/> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The src file is on the local disk. Add it to FS at |
| the given dst name. |
| delSrc indicates if the source should be removed]]> |
| </doc> |
| </method> |
| <method name="copyToLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="delSrc" type="boolean"/> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The src file is under FS, and the dst is on the local disk. |
| Copy it from FS control to the local dst name. |
| delSrc indicates if the src will be removed or not.]]> |
| </doc> |
| </method> |
| <method name="startLocalOutput" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns a local File that the user can write output to. The caller |
| provides both the eventual FS target name and the local working |
| file. If the FS is local, we write directly into the target. If |
| the FS is remote, we write into the tmp local area.]]> |
| </doc> |
| </method> |
| <method name="completeLocalOutput" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Called when we're all done writing to the target. A local FS will |
| do nothing, because we've written to exactly the right place. A remote |
| FS will copy the contents of tmpLocalFile to the correct target at |
| fsOutputFile.]]> |
| </doc> |
| </method> |
| <method name="getDefaultBlockSize" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the number of bytes that large input files should be optimally |
| be split into to minimize i/o time.]]> |
| </doc> |
| </method> |
| <method name="getDefaultReplication" return="short" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the default replication.]]> |
| </doc> |
| </method> |
| <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get file status.]]> |
| </doc> |
| </method> |
| <method name="getConf" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="setOwner" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="p" type="org.apache.hadoop.fs.Path"/> |
| <param name="username" type="java.lang.String"/> |
| <param name="groupname" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="setPermission" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="p" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <field name="fs" type="org.apache.hadoop.fs.FileSystem" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[A <code>FilterFileSystem</code> contains |
| some other file system, which it uses as |
| its basic file system, possibly transforming |
| the data along the way or providing additional |
| functionality. The class <code>FilterFileSystem</code> |
| itself simply overrides all methods of |
| <code>FileSystem</code> with versions that |
| pass all requests to the contained file |
| system. Subclasses of <code>FilterFileSystem</code> |
| may further override some of these methods |
| and may also provide additional methods |
| and fields.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.FilterFileSystem --> |
| <!-- start class org.apache.hadoop.fs.FSDataInputStream --> |
| <class name="FSDataInputStream" extends="java.io.DataInputStream" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.fs.Seekable"/> |
| <implements name="org.apache.hadoop.fs.PositionedReadable"/> |
| <constructor name="FSDataInputStream" type="java.io.InputStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <method name="seek" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="desired" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getPos" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="read" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="position" type="long"/> |
| <param name="buffer" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="length" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFully" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="position" type="long"/> |
| <param name="buffer" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="length" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFully" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="position" type="long"/> |
| <param name="buffer" type="byte[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="seekToNewSource" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="targetPos" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[Utility that wraps a {@link FSInputStream} in a {@link DataInputStream} |
| and buffers input through a {@link BufferedInputStream}.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.FSDataInputStream --> |
| <!-- start class org.apache.hadoop.fs.FSDataOutputStream --> |
| <class name="FSDataOutputStream" extends="java.io.DataOutputStream" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="FSDataOutputStream" type="java.io.OutputStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <constructor name="FSDataOutputStream" type="java.io.OutputStream, org.apache.hadoop.fs.FileSystem.Statistics" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <method name="getPos" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getWrappedStream" return="java.io.OutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[Utility that wraps a {@link OutputStream} in a {@link DataOutputStream}, |
| buffers output through a {@link BufferedOutputStream} and creates a checksum |
| file.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.FSDataOutputStream --> |
| <!-- start class org.apache.hadoop.fs.FSError --> |
| <class name="FSError" extends="java.lang.Error" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Thrown for unexpected filesystem errors, presumed to reflect disk errors |
| in the native filesystem.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.FSError --> |
| <!-- start class org.apache.hadoop.fs.FSInputChecker --> |
| <class name="FSInputChecker" extends="org.apache.hadoop.fs.FSInputStream" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructor |
| |
| @param file The name of the file to be read |
| @param numOfRetries Number of read retries when ChecksumError occurs]]> |
| </doc> |
| </constructor> |
| <constructor name="FSInputChecker" type="org.apache.hadoop.fs.Path, int, boolean, java.util.zip.Checksum, int, int" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructor |
| |
| @param file The name of the file to be read |
| @param numOfRetries Number of read retries when ChecksumError occurs |
| @param sum the type of Checksum engine |
| @param chunkSize maximun chunk size |
| @param checksumSize the number byte of each checksum]]> |
| </doc> |
| </constructor> |
| <method name="readChunk" return="int" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="pos" type="long"/> |
| <param name="buf" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="len" type="int"/> |
| <param name="checksum" type="byte[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Reads in next checksum chunk data into <code>buf</code> at <code>offset</code> |
| and checksum into <code>checksum</code>. |
| The method is used for implementing read, therefore, it should be optimized |
| for sequential reading |
| @param pos chunkPos |
| @param buf desitination buffer |
| @param offset offset in buf at which to store data |
| @param len maximun number of bytes to read |
| @return number of bytes read]]> |
| </doc> |
| </method> |
| <method name="getChunkPosition" return="long" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="pos" type="long"/> |
| <doc> |
| <![CDATA[Return position of beginning of chunk containing pos. |
| |
| @param pos a postion in the file |
| @return the starting position of the chunk which contains the byte]]> |
| </doc> |
| </method> |
| <method name="needChecksum" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return true if there is a need for checksum verification]]> |
| </doc> |
| </method> |
| <method name="read" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read one checksum-verified byte |
| |
| @return the next byte of data, or <code>-1</code> if the end of the |
| stream is reached. |
| @exception IOException if an I/O error occurs.]]> |
| </doc> |
| </method> |
| <method name="read" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read checksum verified bytes from this byte-input stream into |
| the specified byte array, starting at the given offset. |
| |
| <p> This method implements the general contract of the corresponding |
| <code>{@link InputStream#read(byte[], int, int) read}</code> method of |
| the <code>{@link InputStream}</code> class. As an additional |
| convenience, it attempts to read as many bytes as possible by repeatedly |
| invoking the <code>read</code> method of the underlying stream. This |
| iterated <code>read</code> continues until one of the following |
| conditions becomes true: <ul> |
| |
| <li> The specified number of bytes have been read, |
| |
| <li> The <code>read</code> method of the underlying stream returns |
| <code>-1</code>, indicating end-of-file. |
| |
| </ul> If the first <code>read</code> on the underlying stream returns |
| <code>-1</code> to indicate end-of-file then this method returns |
| <code>-1</code>. Otherwise this method returns the number of bytes |
| actually read. |
| |
| @param b destination buffer. |
| @param off offset at which to start storing bytes. |
| @param len maximum number of bytes to read. |
| @return the number of bytes read, or <code>-1</code> if the end of |
| the stream has been reached. |
| @exception IOException if an I/O error occurs. |
| ChecksumException if any checksum error occurs]]> |
| </doc> |
| </method> |
| <method name="getPos" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="available" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="skip" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="n" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Skips over and discards <code>n</code> bytes of data from the |
| input stream. |
| |
| <p>This method may skip more bytes than are remaining in the backing |
| file. This produces no exception and the number of bytes skipped |
| may include some number of bytes that were beyond the EOF of the |
| backing file. Attempting to read from the stream after skipping past |
| the end will result in -1 indicating the end of the file. |
| |
| <p>If <code>n</code> is negative, no bytes are skipped. |
| |
| @param n the number of bytes to be skipped. |
| @return the actual number of bytes skipped. |
| @exception IOException if an I/O error occurs. |
| ChecksumException if the chunk to skip to is corrupted]]> |
| </doc> |
| </method> |
| <method name="seek" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="pos" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Seek to the given position in the stream. |
| The next read() will be from that position. |
| |
| <p>This method may seek past the end of the file. |
| This produces no exception and an attempt to read from |
| the stream will result in -1 indicating the end of the file. |
| |
| @param pos the postion to seek to. |
| @exception IOException if an I/O error occurs. |
| ChecksumException if the chunk to seek to is corrupted]]> |
| </doc> |
| </method> |
| <method name="readFully" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="stm" type="java.io.InputStream"/> |
| <param name="buf" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[A utility function that tries to read up to <code>len</code> bytes from |
| <code>stm</code> |
| |
| @param stm an input stream |
| @param buf destiniation buffer |
| @param offset offset at which to store data |
| @param len number of bytes to read |
| @return actual number of bytes read |
| @throws IOException if there is any IO error]]> |
| </doc> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="true" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="sum" type="java.util.zip.Checksum"/> |
| <param name="maxChunkSize" type="int"/> |
| <param name="checksumSize" type="int"/> |
| <doc> |
| <![CDATA[Set the checksum related parameters |
| @param sum which type of checksum to use |
| @param maxChunkSize maximun chunk size |
| @param checksumSize checksum size]]> |
| </doc> |
| </method> |
| <method name="markSupported" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="mark" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <param name="readlimit" type="int"/> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="file" type="org.apache.hadoop.fs.Path" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The file name from which data is read from]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[This is a generic input stream for verifying checksums for |
| data before it is read by a user.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.FSInputChecker --> |
| <!-- start class org.apache.hadoop.fs.FSInputStream --> |
| <class name="FSInputStream" extends="java.io.InputStream" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.fs.Seekable"/> |
| <implements name="org.apache.hadoop.fs.PositionedReadable"/> |
| <constructor name="FSInputStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="seek" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="pos" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Seek to the given offset from the start of the file. |
| The next read() will be from that location. Can't |
| seek past the end of the file.]]> |
| </doc> |
| </method> |
| <method name="getPos" return="long" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the current offset from the start of the file]]> |
| </doc> |
| </method> |
| <method name="seekToNewSource" return="boolean" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="targetPos" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Seeks a different copy of the data. Returns true if |
| found a new source, false otherwise.]]> |
| </doc> |
| </method> |
| <method name="read" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="position" type="long"/> |
| <param name="buffer" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="length" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFully" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="position" type="long"/> |
| <param name="buffer" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="length" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFully" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="position" type="long"/> |
| <param name="buffer" type="byte[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[FSInputStream is a generic old InputStream with a little bit |
| of RAF-style seek ability.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.FSInputStream --> |
| <!-- start class org.apache.hadoop.fs.FSOutputSummer --> |
| <class name="FSOutputSummer" extends="java.io.OutputStream" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="FSOutputSummer" type="java.util.zip.Checksum, int, int" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="writeChunk" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="len" type="int"/> |
| <param name="checksum" type="byte[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write one byte]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Writes <code>len</code> bytes from the specified byte array |
| starting at offset <code>off</code> and generate a checksum for |
| each data chunk. |
| |
| <p> This method stores bytes from the given array into this |
| stream's buffer before it gets checksumed. The buffer gets checksumed |
| and flushed to the underlying output stream when all data |
| in a checksum chunk are in the buffer. If the buffer is empty and |
| requested length is at least as large as the size of next checksum chunk |
| size, this method will checksum and write the chunk directly |
| to the underlying output stream. Thus it avoids uneccessary data copy. |
| |
| @param b the data. |
| @param off the start offset in the data. |
| @param len the number of bytes to write. |
| @exception IOException if an I/O error occurs.]]> |
| </doc> |
| </method> |
| <method name="flushBuffer" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="flushBuffer" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="keep" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[This is a generic output stream for generating checksums for |
| data before it is written to the underlying stream]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.FSOutputSummer --> |
| <!-- start class org.apache.hadoop.fs.FsShell --> |
| <class name="FsShell" extends="org.apache.hadoop.conf.Configured" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.util.Tool"/> |
| <constructor name="FsShell" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="FsShell" type="org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="init" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getCurrentTrashDir" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the Trash object associated with this shell.]]> |
| </doc> |
| </method> |
| <method name="byteDesc" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="len" type="long"/> |
| <doc> |
| <![CDATA[Return an abbreviated English-language desc of the byte length]]> |
| </doc> |
| </method> |
| <method name="limitDecimalTo2" return="java.lang.String" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="d" type="double"/> |
| </method> |
| <method name="run" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[run]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[main() has some simple utility methods]]> |
| </doc> |
| </method> |
| <field name="fs" type="org.apache.hadoop.fs.FileSystem" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="dateForm" type="java.text.SimpleDateFormat" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="modifFmt" type="java.text.SimpleDateFormat" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Provide command line access to a FileSystem.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.FsShell --> |
| <!-- start class org.apache.hadoop.fs.InMemoryFileSystem --> |
| <class name="InMemoryFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="InMemoryFileSystem" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="InMemoryFileSystem" type="java.net.URI, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="reserveSpaceWithCheckSum" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="size" type="long"/> |
| <doc> |
| <![CDATA[Register a file with its size. This will also register a checksum for the |
| file that the user is trying to create. This is required since none of |
| the FileSystem APIs accept the size of the file as argument. But since it |
| is required for us to apriori know the size of the file we are going to |
| create, the user must call this method for each file he wants to create |
| and reserve memory for that file. We either succeed in reserving memory |
| for both the main file and the checksum file and return true, or return |
| false.]]> |
| </doc> |
| </method> |
| <method name="getFiles" return="org.apache.hadoop.fs.Path[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="filter" type="org.apache.hadoop.fs.PathFilter"/> |
| </method> |
| <method name="getNumFiles" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="filter" type="org.apache.hadoop.fs.PathFilter"/> |
| </method> |
| <method name="getFSSize" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getPercentUsed" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[An implementation of the in-memory filesystem. This implementation assumes |
| that the file lengths are known ahead of time and the total lengths of all |
| the files is below a certain number (like 100 MB, configurable). Use the API |
| reserveSpaceWithCheckSum(Path f, int size) (see below for a description of |
| the API for reserving space in the FS. The uri of this filesystem starts with |
| ramfs:// .]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.InMemoryFileSystem --> |
| <!-- start class org.apache.hadoop.fs.LocalDirAllocator --> |
| <class name="LocalDirAllocator" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="LocalDirAllocator" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create an allocator object |
| @param contextCfgItemName]]> |
| </doc> |
| </constructor> |
| <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="pathStr" type="java.lang.String"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get a path from the local FS. This method should be used if the size of |
| the file is not known apriori. We go round-robin over the set of disks |
| (via the configured dirs) and return the first complete path where |
| we could create the parent directory of the passed path. |
| @param pathStr the requested path (this will be created on the first |
| available disk) |
| @param conf the Configuration object |
| @return the complete path to the file on a local disk |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getLocalPathForWrite" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="pathStr" type="java.lang.String"/> |
| <param name="size" type="long"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get a path from the local FS. Pass size as -1 if not known apriori. We |
| round-robin over the set of disks (via the configured dirs) and return |
| the first complete path which has enough space |
| @param pathStr the requested path (this will be created on the first |
| available disk) |
| @param size the size of the file that is going to be written |
| @param conf the Configuration object |
| @return the complete path to the file on a local disk |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getLocalPathToRead" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="pathStr" type="java.lang.String"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get a path from the local FS for reading. We search through all the |
| configured dirs for the file's existence and return the complete |
| path to the file when we find one |
| @param pathStr the requested file (this will be searched) |
| @param conf the Configuration object |
| @return the complete path to the file on a local disk |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="createTmpFileForWrite" return="java.io.File" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="pathStr" type="java.lang.String"/> |
| <param name="size" type="long"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Creates a temporary file in the local FS. Pass size as -1 if not known |
| apriori. We round-robin over the set of disks (via the configured dirs) |
| and select the first complete path which has enough space. A file is |
| created on this directory. The file is guaranteed to go away when the |
| JVM exits. |
| @param pathStr prefix for the temporary file |
| @param size the size of the file that is going to be written |
| @param conf the Configuration object |
| @return a unique temporary file |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="isContextValid" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="contextCfgItemName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Method to check whether a context is valid |
| @param contextCfgItemName |
| @return true/false]]> |
| </doc> |
| </method> |
| <method name="ifExists" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="pathStr" type="java.lang.String"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[We search through all the configured dirs for the file's existence |
| and return true when we find |
| @param pathStr the requested file (this will be searched) |
| @param conf the Configuration object |
| @return true if files exist. false otherwise |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[An implementation of a round-robin scheme for disk allocation for creating |
| files. The way it works is that it is kept track what disk was last |
| allocated for a file write. For the current request, the next disk from |
| the set of disks would be allocated if the free space on the disk is |
| sufficient enough to accomodate the file that is being considered for |
| creation. If the space requirements cannot be met, the next disk in order |
| would be tried and so on till a disk is found with sufficient capacity. |
| Once a disk with sufficient space is identified, a check is done to make |
| sure that the disk is writable. Also, there is an API provided that doesn't |
| take the space requirements into consideration but just checks whether the |
| disk under consideration is writable (this should be used for cases where |
| the file size is not known apriori). An API is provided to read a path that |
| was created earlier. That API works by doing a scan of all the disks for the |
| input pathname. |
| This implementation also provides the functionality of having multiple |
| allocators per JVM (one for each unique functionality or context, like |
| mapred, dfs-client, etc.). It ensures that there is only one instance of |
| an allocator per context per JVM. |
| Note: |
| 1. The contexts referred above are actually the configuration items defined |
| in the Configuration class like "mapred.local.dir" (for which we want to |
| control the dir allocations). The context-strings are exactly those |
| configuration items. |
| 2. This implementation does not take into consideration cases where |
| a disk becomes read-only or goes out of space while a file is being written |
| to (disks are shared between multiple processes, and so the latter situation |
| is probable). |
| 3. In the class implementation, "Disk" is referred to as "Dir", which |
| actually points to the configured directory on the Disk which will be the |
| parent for all file write/read allocations.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.LocalDirAllocator --> |
| <!-- start class org.apache.hadoop.fs.LocalFileSystem --> |
| <class name="LocalFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="LocalFileSystem" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="LocalFileSystem" type="org.apache.hadoop.fs.FileSystem" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="pathToFile" return="java.io.File" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Convert a path to a File.]]> |
| </doc> |
| </method> |
| <method name="copyFromLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="delSrc" type="boolean"/> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="copyToLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="delSrc" type="boolean"/> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="reportChecksumFailure" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="p" type="org.apache.hadoop.fs.Path"/> |
| <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/> |
| <param name="inPos" type="long"/> |
| <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/> |
| <param name="sumsPos" type="long"/> |
| <doc> |
| <![CDATA[Moves files to a bad file directory on the same device, so that their |
| storage will not be reused.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Implement the FileSystem API for the checksumed local filesystem.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.LocalFileSystem --> |
| <!-- start class org.apache.hadoop.fs.Path --> |
| <class name="Path" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.lang.Comparable"/> |
| <constructor name="Path" type="java.lang.String, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Resolve a child path against a parent path.]]> |
| </doc> |
| </constructor> |
| <constructor name="Path" type="org.apache.hadoop.fs.Path, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Resolve a child path against a parent path.]]> |
| </doc> |
| </constructor> |
| <constructor name="Path" type="java.lang.String, org.apache.hadoop.fs.Path" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Resolve a child path against a parent path.]]> |
| </doc> |
| </constructor> |
| <constructor name="Path" type="org.apache.hadoop.fs.Path, org.apache.hadoop.fs.Path" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Resolve a child path against a parent path.]]> |
| </doc> |
| </constructor> |
| <constructor name="Path" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a path from a String. Path strings are URIs, but with |
| unescaped elements and some additional normalization.]]> |
| </doc> |
| </constructor> |
| <constructor name="Path" type="java.lang.String, java.lang.String, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a Path from components.]]> |
| </doc> |
| </constructor> |
| <method name="toUri" return="java.net.URI" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Convert this to a URI.]]> |
| </doc> |
| </method> |
| <method name="getFileSystem" return="org.apache.hadoop.fs.FileSystem" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the FileSystem that owns this Path.]]> |
| </doc> |
| </method> |
| <method name="isAbsolute" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[True if the directory of this path is absolute.]]> |
| </doc> |
| </method> |
| <method name="getName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the final component of this path.]]> |
| </doc> |
| </method> |
| <method name="getParent" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the parent of a path or null if at root.]]> |
| </doc> |
| </method> |
| <method name="suffix" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="suffix" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Adds a suffix to the final name in the path.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| </method> |
| <method name="depth" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the number of elements in this path.]]> |
| </doc> |
| </method> |
| <method name="makeQualified" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <doc> |
| <![CDATA[Returns a qualified path object.]]> |
| </doc> |
| </method> |
| <field name="SEPARATOR" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The directory separator, a slash.]]> |
| </doc> |
| </field> |
| <field name="SEPARATOR_CHAR" type="char" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="CUR_DIR" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Names a file or directory in a {@link FileSystem}. |
| Path strings use slash as the directory separator. A path string is |
| absolute if it begins with a slash.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.Path --> |
| <!-- start interface org.apache.hadoop.fs.PathFilter --> |
| <interface name="PathFilter" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="accept" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Tests whether or not the specified abstract pathname should be |
| included in a pathname list. |
| |
| @param path The abstract pathname to be tested |
| @return <code>true</code> if and only if <code>pathname</code> |
| should be included]]> |
| </doc> |
| </method> |
| </interface> |
| <!-- end interface org.apache.hadoop.fs.PathFilter --> |
| <!-- start interface org.apache.hadoop.fs.PositionedReadable --> |
| <interface name="PositionedReadable" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="read" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="position" type="long"/> |
| <param name="buffer" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="length" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read upto the specified number of bytes, from a given |
| position within a file, and return the number of bytes read. This does not |
| change the current offset of a file, and is thread-safe.]]> |
| </doc> |
| </method> |
| <method name="readFully" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="position" type="long"/> |
| <param name="buffer" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="length" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read the specified number of bytes, from a given |
| position within a file. This does not |
| change the current offset of a file, and is thread-safe.]]> |
| </doc> |
| </method> |
| <method name="readFully" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="position" type="long"/> |
| <param name="buffer" type="byte[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read number of bytes equalt to the length of the buffer, from a given |
| position within a file. This does not |
| change the current offset of a file, and is thread-safe.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Stream that permits positional reading.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.fs.PositionedReadable --> |
| <!-- start class org.apache.hadoop.fs.RawLocalFileSystem --> |
| <class name="RawLocalFileSystem" extends="org.apache.hadoop.fs.FileSystem" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="RawLocalFileSystem" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="pathToFile" return="java.io.File" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Convert a path to a File.]]> |
| </doc> |
| </method> |
| <method name="getName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="deprecated, no comment"> |
| <doc> |
| <![CDATA[@deprecated]]> |
| </doc> |
| </method> |
| <method name="getUri" return="java.net.URI" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="initialize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="uri" type="java.net.URI"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| </method> |
| <method name="open" return="org.apache.hadoop.fs.FSDataInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="bufferSize" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="bufferSize" type="int"/> |
| <param name="replication" type="short"/> |
| <param name="blockSize" type="long"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="bufferSize" type="int"/> |
| <param name="replication" type="short"/> |
| <param name="blockSize" type="long"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="rename" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="p" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="p" type="org.apache.hadoop.fs.Path"/> |
| <param name="recursive" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="mkdirs" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Creates the specified directory hierarchy. Does not |
| treat existence as an error.]]> |
| </doc> |
| </method> |
| <method name="mkdirs" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="setWorkingDirectory" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="newDir" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Set the working directory to the given directory.]]> |
| </doc> |
| </method> |
| <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="lock" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="deprecated, no comment"> |
| <param name="p" type="org.apache.hadoop.fs.Path"/> |
| <param name="shared" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@deprecated]]> |
| </doc> |
| </method> |
| <method name="release" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="deprecated, no comment"> |
| <param name="p" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@deprecated]]> |
| </doc> |
| </method> |
| <method name="moveFromLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startLocalOutput" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="completeLocalOutput" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fsWorkingFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="setOwner" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="p" type="org.apache.hadoop.fs.Path"/> |
| <param name="username" type="java.lang.String"/> |
| <param name="groupname" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Use the command chown to set owner.]]> |
| </doc> |
| </method> |
| <method name="setPermission" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="p" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Use the command chmod to set permission.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Implement the FileSystem API for the raw local filesystem.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.RawLocalFileSystem --> |
| <!-- start interface org.apache.hadoop.fs.Seekable --> |
| <interface name="Seekable" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="seek" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="pos" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Seek to the given offset from the start of the file. |
| The next read() will be from that location. Can't |
| seek past the end of the file.]]> |
| </doc> |
| </method> |
| <method name="getPos" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the current offset from the start of the file]]> |
| </doc> |
| </method> |
| <method name="seekToNewSource" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="targetPos" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Seeks a different copy of the data. Returns true if |
| found a new source, false otherwise.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Stream that permits seeking.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.fs.Seekable --> |
| <!-- start class org.apache.hadoop.fs.ShellCommand --> |
| <class name="ShellCommand" extends="org.apache.hadoop.util.Shell" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="Use {@link Shell} instead."> |
| <constructor name="ShellCommand" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[A base class for running a unix command like du or df. |
| @deprecated Use {@link Shell} instead.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.ShellCommand --> |
| <!-- start class org.apache.hadoop.fs.Trash --> |
| <class name="Trash" extends="org.apache.hadoop.conf.Configured" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Trash" type="org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct a trash can accessor. |
| @param conf a Configuration]]> |
| </doc> |
| </constructor> |
| <method name="moveToTrash" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Move a file or directory to the current trash directory. |
| @return false if the item is already in the trash or trash is disabled]]> |
| </doc> |
| </method> |
| <method name="checkpoint" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a trash checkpoint.]]> |
| </doc> |
| </method> |
| <method name="expunge" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Delete old checkpoints.]]> |
| </doc> |
| </method> |
| <method name="getEmptier" return="java.lang.Runnable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return a {@link Runnable} that periodically empties the trash of all |
| users, intended to be run by the superuser. Only one checkpoint is kept |
| at a time.]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[Run an emptier.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Provides a <i>trash</i> feature. Files are moved to a user's trash |
| directory, a subdirectory of their home directory named ".Trash". Files are |
| initially moved to a <i>current</i> sub-directory of the trash directory. |
| Within that sub-directory their original path is preserved. Periodically |
| one may checkpoint the current trash and remove older checkpoints. (This |
| design permits trash management without enumeration of the full trash |
| content, without date support in the filesystem, and without clock |
| synchronization.)]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.Trash --> |
| <doc> |
| <![CDATA[An abstract file system API.]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.fs.kfs"> |
| <!-- start class org.apache.hadoop.fs.kfs.KosmosFileSystem --> |
| <class name="KosmosFileSystem" extends="org.apache.hadoop.fs.FileSystem" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="KosmosFileSystem" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getUri" return="java.net.URI" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="initialize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="uri" type="java.net.URI"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="setWorkingDirectory" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dir" type="org.apache.hadoop.fs.Path"/> |
| </method> |
| <method name="mkdirs" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="isDirectory" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="isFile" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getContentLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="file" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="bufferSize" type="int"/> |
| <param name="replication" type="short"/> |
| <param name="blockSize" type="long"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="open" return="org.apache.hadoop.fs.FSDataInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <param name="bufferSize" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="rename" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <param name="recursive" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getReplication" return="short" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getDefaultReplication" return="short" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="setReplication" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <param name="replication" type="short"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getDefaultBlockSize" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="lock" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <param name="shared" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="release" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <param name="start" type="long"/> |
| <param name="len" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return null if the file doesn't exist; otherwise, get the |
| locations of the various chunks of the file file from KFS.]]> |
| </doc> |
| </method> |
| <method name="copyFromLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="delSrc" type="boolean"/> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="copyToLocalFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="delSrc" type="boolean"/> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startLocalOutput" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="completeLocalOutput" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fsOutputFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="tmpLocalFile" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A FileSystem backed by KFS.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.kfs.KosmosFileSystem --> |
| <doc> |
| <![CDATA[<h1>A client for the Kosmos filesystem (KFS)</h1> |
| |
| <h3>Introduction</h3> |
| |
| This pages describes how to use Kosmos Filesystem |
| (<a href="http://kosmosfs.sourceforge.net"> KFS </a>) as a backing |
| store with Hadoop. This page assumes that you have downloaded the |
| KFS software and installed necessary binaries as outlined in the KFS |
| documentation. |
| |
| <h3>Steps</h3> |
| |
| <ul> |
| <li>In the Hadoop conf directory edit hadoop-default.xml, |
| add the following: |
| <pre> |
| <property> |
| <name>fs.kfs.impl</name> |
| <value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value> |
| <description>The FileSystem for kfs: uris.</description> |
| </property> |
| </pre> |
| |
| <li>In the Hadoop conf directory edit hadoop-site.xml, |
| adding the following (with appropriate values for |
| <server> and <port>): |
| <pre> |
| <property> |
| <name>fs.default.name</name> |
| <value>kfs://<server:port></value> |
| </property> |
| |
| <property> |
| <name>fs.kfs.metaServerHost</name> |
| <value><server></value> |
| <description>The location of the KFS meta server.</description> |
| </property> |
| |
| <property> |
| <name>fs.kfs.metaServerPort</name> |
| <value><port></value> |
| <description>The location of the meta server's port.</description> |
| </property> |
| |
| </pre> |
| </li> |
| |
| <li>Copy KFS's <i> kfs-0.1.jar </i> to Hadoop's lib directory. This step |
| enables Hadoop's to load the KFS specific modules. Note |
| that, kfs-0.1.jar was built when you compiled KFS source |
| code. This jar file contains code that calls KFS's client |
| library code via JNI; the native code is in KFS's <i> |
| libkfsClient.so </i> library. |
| </li> |
| |
| <li> When the Hadoop map/reduce trackers start up, those |
| processes (on local as well as remote nodes) will now need to load |
| KFS's <i> libkfsClient.so </i> library. To simplify this process, it is advisable to |
| store libkfsClient.so in an NFS accessible directory (similar to where |
| Hadoop binaries/scripts are stored); then, modify Hadoop's |
| conf/hadoop-env.sh adding the following line and providing suitable |
| value for <path>: |
| <pre> |
| export LD_LIBRARY_PATH=<path> |
| </pre> |
| |
| |
| <li>Start only the map/reduce trackers |
| <br /> |
| example: execute Hadoop's bin/start-mapred.sh</li> |
| </ul> |
| <br/> |
| |
| If the map/reduce job trackers start up, all file-I/O is done to KFS.]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.fs.permission"> |
| <!-- start class org.apache.hadoop.fs.permission.AccessControlException --> |
| <class name="AccessControlException" extends="java.io.IOException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="AccessControlException" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Default constructor is needed for unwrapping from |
| {@link org.apache.hadoop.ipc.RemoteException}.]]> |
| </doc> |
| </constructor> |
| <constructor name="AccessControlException" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructs an {@link AccessControlException} |
| with the specified detail message. |
| @param s the detail message.]]> |
| </doc> |
| </constructor> |
| <doc> |
| <![CDATA[An exception class for access control related issues.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.permission.AccessControlException --> |
| <!-- start class org.apache.hadoop.fs.permission.FsAction --> |
| <class name="FsAction" extends="java.lang.Enum<org.apache.hadoop.fs.permission.FsAction>" |
| abstract="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.fs.permission.FsAction[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.fs.permission.FsAction" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| <method name="implies" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/> |
| <doc> |
| <![CDATA[Return true if this action implies that action. |
| @param that]]> |
| </doc> |
| </method> |
| <method name="and" return="org.apache.hadoop.fs.permission.FsAction" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/> |
| <doc> |
| <![CDATA[AND operation.]]> |
| </doc> |
| </method> |
| <method name="or" return="org.apache.hadoop.fs.permission.FsAction" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="that" type="org.apache.hadoop.fs.permission.FsAction"/> |
| <doc> |
| <![CDATA[OR operation.]]> |
| </doc> |
| </method> |
| <method name="not" return="org.apache.hadoop.fs.permission.FsAction" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[NOT operation.]]> |
| </doc> |
| </method> |
| <field name="INDEX" type="int" |
| transient="false" volatile="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Octal representation]]> |
| </doc> |
| </field> |
| <field name="SYMBOL" type="java.lang.String" |
| transient="false" volatile="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Symbolic representation]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[File system actions, e.g. read, write, etc.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.permission.FsAction --> |
| <!-- start class org.apache.hadoop.fs.permission.FsPermission --> |
| <class name="FsPermission" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction, org.apache.hadoop.fs.permission.FsAction" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct by the given {@link FsAction}. |
| @param u user action |
| @param g group action |
| @param o other action]]> |
| </doc> |
| </constructor> |
| <constructor name="FsPermission" type="short" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct by the given mode. |
| @param mode |
| @see #toShort()]]> |
| </doc> |
| </constructor> |
| <constructor name="FsPermission" type="org.apache.hadoop.fs.permission.FsPermission" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Copy constructor |
| |
| @param other other permission]]> |
| </doc> |
| </constructor> |
| <method name="createImmutable" return="org.apache.hadoop.fs.permission.FsPermission" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="permission" type="short"/> |
| <doc> |
| <![CDATA[Create an immutable {@link FsPermission} object.]]> |
| </doc> |
| </method> |
| <method name="getUserAction" return="org.apache.hadoop.fs.permission.FsAction" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return user {@link FsAction}.]]> |
| </doc> |
| </method> |
| <method name="getGroupAction" return="org.apache.hadoop.fs.permission.FsAction" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return group {@link FsAction}.]]> |
| </doc> |
| </method> |
| <method name="getOtherAction" return="org.apache.hadoop.fs.permission.FsAction" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return other {@link FsAction}.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="read" return="org.apache.hadoop.fs.permission.FsPermission" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create and initialize a {@link FsPermission} from {@link DataInput}.]]> |
| </doc> |
| </method> |
| <method name="toShort" return="short" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Encode the object to a short.]]> |
| </doc> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="obj" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="applyUMask" return="org.apache.hadoop.fs.permission.FsPermission" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <doc> |
| <![CDATA[Apply a umask to this permission and return a new one]]> |
| </doc> |
| </method> |
| <method name="getUMask" return="org.apache.hadoop.fs.permission.FsPermission" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Get the user file creation mask (umask)]]> |
| </doc> |
| </method> |
| <method name="setUMask" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <doc> |
| <![CDATA[Set the user file creation mask (umask)]]> |
| </doc> |
| </method> |
| <method name="getDefault" return="org.apache.hadoop.fs.permission.FsPermission" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the default permission.]]> |
| </doc> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.fs.permission.FsPermission" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="unixSymbolicPermission" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Create a FsPermission from a Unix symbolic permission string |
| @param unixSymbolicPermission e.g. "-rw-rw-rw-"]]> |
| </doc> |
| </method> |
| <field name="UMASK_LABEL" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[umask property label]]> |
| </doc> |
| </field> |
| <field name="DEFAULT_UMASK" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[A class for file/directory permissions.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.permission.FsPermission --> |
| <!-- start class org.apache.hadoop.fs.permission.PermissionStatus --> |
| <class name="PermissionStatus" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <constructor name="PermissionStatus" type="java.lang.String, java.lang.String, org.apache.hadoop.fs.permission.FsPermission" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructor]]> |
| </doc> |
| </constructor> |
| <method name="createImmutable" return="org.apache.hadoop.fs.permission.PermissionStatus" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="user" type="java.lang.String"/> |
| <param name="group" type="java.lang.String"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <doc> |
| <![CDATA[Create an immutable {@link PermissionStatus} object.]]> |
| </doc> |
| </method> |
| <method name="getUserName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return user name]]> |
| </doc> |
| </method> |
| <method name="getGroupName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return group name]]> |
| </doc> |
| </method> |
| <method name="getPermission" return="org.apache.hadoop.fs.permission.FsPermission" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return permission]]> |
| </doc> |
| </method> |
| <method name="applyUMask" return="org.apache.hadoop.fs.permission.PermissionStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="umask" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <doc> |
| <![CDATA[Apply umask. |
| @see FsPermission#applyUMask(FsPermission)]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="read" return="org.apache.hadoop.fs.permission.PermissionStatus" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create and initialize a {@link PermissionStatus} from {@link DataInput}.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Store permission related information.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.permission.PermissionStatus --> |
| </package> |
| <package name="org.apache.hadoop.fs.s3"> |
| <!-- start class org.apache.hadoop.fs.s3.Block --> |
| <class name="Block" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Block" type="long, long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getId" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[Holds metadata about a block of data being stored in a {@link FileSystemStore}.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.s3.Block --> |
| <!-- start interface org.apache.hadoop.fs.s3.FileSystemStore --> |
| <interface name="FileSystemStore" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="initialize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="uri" type="java.net.URI"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getVersion" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="storeINode" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <param name="inode" type="org.apache.hadoop.fs.s3.INode"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="storeBlock" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="block" type="org.apache.hadoop.fs.s3.Block"/> |
| <param name="file" type="java.io.File"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="inodeExists" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="blockExists" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="blockId" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="retrieveINode" return="org.apache.hadoop.fs.s3.INode" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="retrieveBlock" return="java.io.File" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="block" type="org.apache.hadoop.fs.s3.Block"/> |
| <param name="byteRangeStart" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="deleteINode" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="deleteBlock" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="block" type="org.apache.hadoop.fs.s3.Block"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="listSubPaths" return="java.util.Set<org.apache.hadoop.fs.Path>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="listDeepSubPaths" return="java.util.Set<org.apache.hadoop.fs.Path>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="purge" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Delete everything. Used for testing. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="dump" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Diagnostic method to dump all INodes to the console. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A facility for storing and retrieving {@link INode}s and {@link Block}s.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.fs.s3.FileSystemStore --> |
| <!-- start class org.apache.hadoop.fs.s3.INode --> |
| <class name="INode" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="INode" type="org.apache.hadoop.fs.s3.INode.FileType, org.apache.hadoop.fs.s3.Block[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getBlocks" return="org.apache.hadoop.fs.s3.Block[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getFileType" return="org.apache.hadoop.fs.s3.INode.FileType" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="isDirectory" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="isFile" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getSerializedLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="serialize" return="java.io.InputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="deserialize" return="org.apache.hadoop.fs.s3.INode" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <field name="FILE_TYPES" type="org.apache.hadoop.fs.s3.INode.FileType[]" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="DIRECTORY_INODE" type="org.apache.hadoop.fs.s3.INode" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Holds file metadata including type (regular file, or directory), |
| and the list of blocks that are pointers to the data.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.s3.INode --> |
| <!-- start class org.apache.hadoop.fs.s3.MigrationTool --> |
| <class name="MigrationTool" extends="org.apache.hadoop.conf.Configured" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.util.Tool"/> |
| <constructor name="MigrationTool" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| </method> |
| <method name="run" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| </method> |
| <method name="initialize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="uri" type="java.net.URI"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[<p> |
| This class is a tool for migrating data from an older to a newer version |
| of an S3 filesystem. |
| </p> |
| <p> |
| All files in the filesystem are migrated by re-writing the block metadata |
| - no datafiles are touched. |
| </p>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.s3.MigrationTool --> |
| <!-- start class org.apache.hadoop.fs.s3.S3Exception --> |
| <class name="S3Exception" extends="java.lang.RuntimeException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="S3Exception" type="java.lang.Throwable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[Thrown if there is a problem communicating with Amazon S3.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.s3.S3Exception --> |
| <!-- start class org.apache.hadoop.fs.s3.S3FileSystem --> |
| <class name="S3FileSystem" extends="org.apache.hadoop.fs.FileSystem" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="S3FileSystem" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="S3FileSystem" type="org.apache.hadoop.fs.s3.FileSystemStore" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getUri" return="java.net.URI" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="initialize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="uri" type="java.net.URI"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="setWorkingDirectory" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dir" type="org.apache.hadoop.fs.Path"/> |
| </method> |
| <method name="mkdirs" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@param permission Currently ignored.]]> |
| </doc> |
| </method> |
| <method name="isFile" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="file" type="org.apache.hadoop.fs.Path"/> |
| <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/> |
| <param name="overwrite" type="boolean"/> |
| <param name="bufferSize" type="int"/> |
| <param name="replication" type="short"/> |
| <param name="blockSize" type="long"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@param permission Currently ignored.]]> |
| </doc> |
| </method> |
| <method name="open" return="org.apache.hadoop.fs.FSDataInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <param name="bufferSize" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="rename" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="org.apache.hadoop.fs.Path"/> |
| <param name="dst" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <param name="recursive" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="delete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[FileStatus for S3 file systems.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[<p> |
| A {@link FileSystem} backed by <a href="http://aws.amazon.com/s3">Amazon S3</a>. |
| </p>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.s3.S3FileSystem --> |
| <!-- start class org.apache.hadoop.fs.s3.S3FileSystemException --> |
| <class name="S3FileSystemException" extends="java.io.IOException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="S3FileSystemException" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[Thrown when there is a fatal exception while using {@link S3FileSystem}.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.s3.S3FileSystemException --> |
| <!-- start class org.apache.hadoop.fs.s3.VersionMismatchException --> |
| <class name="VersionMismatchException" extends="org.apache.hadoop.fs.s3.S3FileSystemException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="VersionMismatchException" type="java.lang.String, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[Thrown when Hadoop cannot read the version of the data stored |
| in {@link S3FileSystem}.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.s3.VersionMismatchException --> |
| <doc> |
| <![CDATA[<p>A distributed implementation of {@link |
| org.apache.hadoop.fs.FileSystem} that uses <a href="http://aws.amazon.com/s3">Amazon S3</a>.</p> |
| |
| <p> |
| Files are stored in S3 as blocks (represented by |
| {@link org.apache.hadoop.fs.s3.Block}), which have an ID and a length. |
| Block metadata is stored in S3 as a small record (represented by |
| {@link org.apache.hadoop.fs.s3.INode}) using the URL-encoded |
| path string as a key. Inodes record the file type (regular file or directory) and the list of blocks. |
| This design makes it easy to seek to any given position in a file by reading the inode data to compute |
| which block to access, then using S3's support for |
| <a href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.2">HTTP Range</a> headers |
| to start streaming from the correct position. |
| Renames are also efficient since only the inode is moved (by a DELETE followed by a PUT since |
| S3 does not support renames). |
| </p> |
| <p> |
| For a single file <i>/dir1/file1</i> which takes two blocks of storage, the file structure in S3 |
| would be something like this: |
| </p> |
| <pre> |
| / |
| /dir1 |
| /dir1/file1 |
| block-6415776850131549260 |
| block-3026438247347758425 |
| </pre> |
| <p> |
| Inodes start with a leading <code>/</code>, while blocks are prefixed with <code>block-</code>. |
| </p>]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.fs.shell"> |
| <!-- start class org.apache.hadoop.fs.shell.Count --> |
| <class name="Count" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Count" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="matches" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="cmd" type="java.lang.String"/> |
| </method> |
| <method name="count" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.lang.String"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="out" type="java.io.PrintStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <field name="NAME" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="USAGE" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="DESCRIPTION" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Count the number of directories, files and bytes.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.fs.shell.Count --> |
| </package> |
| <package name="org.apache.hadoop.io"> |
| <!-- start class org.apache.hadoop.io.AbstractMapWritable --> |
| <class name="AbstractMapWritable" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <implements name="org.apache.hadoop.conf.Configurable"/> |
| <constructor name="AbstractMapWritable" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[constructor.]]> |
| </doc> |
| </constructor> |
| <method name="addToMap" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="clazz" type="java.lang.Class"/> |
| <doc> |
| <![CDATA[Add a Class to the maps if it is not already present.]]> |
| </doc> |
| </method> |
| <method name="getClass" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="id" type="byte"/> |
| <doc> |
| <![CDATA[@return the Class class for the specified id]]> |
| </doc> |
| </method> |
| <method name="getId" return="byte" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="clazz" type="java.lang.Class"/> |
| <doc> |
| <![CDATA[@return the id for the specified Class]]> |
| </doc> |
| </method> |
| <method name="copy" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="other" type="org.apache.hadoop.io.Writable"/> |
| <doc> |
| <![CDATA[Used by child copy constructors.]]> |
| </doc> |
| </method> |
| <method name="getConf" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the conf]]> |
| </doc> |
| </method> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[@param conf the conf to set]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Abstract base class for MapWritable and SortedMapWritable |
| |
| Unlike org.apache.nutch.crawl.MapWritable, this class allows creation of |
| MapWritable<Writable, MapWritable> so the CLASS_TO_ID and ID_TO_CLASS |
| maps travel with the class instead of being static. |
| |
| Class ids range from 1 to 127 so there can be at most 127 distinct classes |
| in any specific map instance.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.AbstractMapWritable --> |
| <!-- start class org.apache.hadoop.io.ArrayFile --> |
| <class name="ArrayFile" extends="org.apache.hadoop.io.MapFile" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ArrayFile" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[A dense file-based mapping from integers to values.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.ArrayFile --> |
| <!-- start class org.apache.hadoop.io.ArrayFile.Reader --> |
| <class name="ArrayFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ArrayFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct an array reader for the named file.]]> |
| </doc> |
| </constructor> |
| <method name="seek" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="n" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Positions the reader before its <code>n</code>th value.]]> |
| </doc> |
| </method> |
| <method name="next" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read and return the next value in the file.]]> |
| </doc> |
| </method> |
| <method name="key" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the key associated with the most recent call to {@link |
| #seek(long)}, {@link #next(Writable)}, or {@link |
| #get(long,Writable)}.]]> |
| </doc> |
| </method> |
| <method name="get" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="n" type="long"/> |
| <param name="value" type="org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the <code>n</code>th value in the file.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Provide access to an existing array file.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.ArrayFile.Reader --> |
| <!-- start class org.apache.hadoop.io.ArrayFile.Writer --> |
| <class name="ArrayFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create the named file for values of the named class.]]> |
| </doc> |
| </constructor> |
| <constructor name="ArrayFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create the named file for values of the named class.]]> |
| </doc> |
| </constructor> |
| <method name="append" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Append a value to the file.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Write a new array file.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.ArrayFile.Writer --> |
| <!-- start class org.apache.hadoop.io.ArrayWritable --> |
| <class name="ArrayWritable" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <constructor name="ArrayWritable" type="java.lang.Class" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="ArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="ArrayWritable" type="java.lang.String[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getValueClass" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="toStrings" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="toArray" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="values" type="org.apache.hadoop.io.Writable[]"/> |
| </method> |
| <method name="get" return="org.apache.hadoop.io.Writable[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A Writable for arrays containing instances of a class. The elements of this |
| writable must all be instances of the same class. If this writable will be |
| the input for a Reducer, you will need to create a subclass that sets the |
| value to be of the proper type. |
| |
| For example: |
| <code> |
| public class IntArrayWritable extends ArrayWritable { |
| public IntArrayWritable() { |
| super(IntWritable.class); |
| } |
| } |
| </code>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.ArrayWritable --> |
| <!-- start class org.apache.hadoop.io.BooleanWritable --> |
| <class name="BooleanWritable" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.WritableComparable"/> |
| <constructor name="BooleanWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="BooleanWritable" type="boolean" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="boolean"/> |
| <doc> |
| <![CDATA[Set the value of the BooleanWritable]]> |
| </doc> |
| </method> |
| <method name="get" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the value of the BooleanWritable]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[A WritableComparable for booleans.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.BooleanWritable --> |
| <!-- start class org.apache.hadoop.io.BooleanWritable.Comparator --> |
| <class name="BooleanWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="BooleanWritable.Comparator" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| </method> |
| <doc> |
| <![CDATA[A Comparator optimized for BooleanWritable.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.BooleanWritable.Comparator --> |
| <!-- start class org.apache.hadoop.io.BytesWritable --> |
| <class name="BytesWritable" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.WritableComparable"/> |
| <constructor name="BytesWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a zero-size sequence.]]> |
| </doc> |
| </constructor> |
| <constructor name="BytesWritable" type="byte[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a BytesWritable using the byte array as the initial value. |
| @param bytes This array becomes the backing storage for the object.]]> |
| </doc> |
| </constructor> |
| <method name="get" return="byte[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the data from the BytesWritable. |
| @return The data is only valid between 0 and getSize() - 1.]]> |
| </doc> |
| </method> |
| <method name="getSize" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the current size of the buffer.]]> |
| </doc> |
| </method> |
| <method name="setSize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="size" type="int"/> |
| <doc> |
| <![CDATA[Change the size of the buffer. The values in the old range are preserved |
| and any new values are undefined. The capacity is changed if it is |
| necessary. |
| @param size The new number of bytes]]> |
| </doc> |
| </method> |
| <method name="getCapacity" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the capacity, which is the maximum size that could handled without |
| resizing the backing storage. |
| @return The number of bytes]]> |
| </doc> |
| </method> |
| <method name="setCapacity" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="new_cap" type="int"/> |
| <doc> |
| <![CDATA[Change the capacity of the backing storage. |
| The data is preserved. |
| @param new_cap The new capacity in bytes.]]> |
| </doc> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="newData" type="org.apache.hadoop.io.BytesWritable"/> |
| <doc> |
| <![CDATA[Set the BytesWritable to the contents of the given newData. |
| @param newData the value to set this BytesWritable to.]]> |
| </doc> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="newData" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="length" type="int"/> |
| <doc> |
| <![CDATA[Set the value to a copy of the given byte range |
| @param newData the new values to copy in |
| @param offset the offset in newData to start at |
| @param length the number of bytes to copy]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="right_obj" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Define the sort order of the BytesWritable. |
| @param right_obj The other bytes writable |
| @return Positive if left is bigger than right, 0 if they are equal, and |
| negative if left is smaller than right.]]> |
| </doc> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="right_obj" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Are the two byte sequences equal?]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Generate the stream of bytes as hex pairs separated by ' '.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A byte sequence that is usable as a key or value. |
| It is resizable and distinguishes between the size of the seqeunce and |
| the current capacity. The hash function is the front of the md5 of the |
| buffer. The sort order is the same as memcmp.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.BytesWritable --> |
| <!-- start class org.apache.hadoop.io.BytesWritable.Comparator --> |
| <class name="BytesWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="BytesWritable.Comparator" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| <doc> |
| <![CDATA[Compare the buffers in serialized form.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A Comparator optimized for BytesWritable.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.BytesWritable.Comparator --> |
| <!-- start interface org.apache.hadoop.io.Closeable --> |
| <interface name="Closeable" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Called after the last call to any other method on this object to free |
| and/or flush resources. Typical implementations do nothing.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[That which can be closed.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.io.Closeable --> |
| <!-- start class org.apache.hadoop.io.CompressedWritable --> |
| <class name="CompressedWritable" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <constructor name="CompressedWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="ensureInflated" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Must be called by all methods which access fields to ensure that the data |
| has been uncompressed.]]> |
| </doc> |
| </method> |
| <method name="readFieldsCompressed" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Subclasses implement this instead of {@link #readFields(DataInput)}.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeCompressed" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Subclasses implement this instead of {@link #write(DataOutput)}.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A base-class for Writables which store themselves compressed and lazily |
| inflate on field access. This is useful for large objects whose fields are |
| not be altered during a map or reduce operation: leaving the field data |
| compressed makes copying the instance from one file to another much |
| faster.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.CompressedWritable --> |
| <!-- start class org.apache.hadoop.io.DataInputBuffer --> |
| <class name="DataInputBuffer" extends="java.io.DataInputStream" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="DataInputBuffer" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructs a new empty buffer.]]> |
| </doc> |
| </constructor> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="input" type="byte[]"/> |
| <param name="length" type="int"/> |
| <doc> |
| <![CDATA[Resets the data that the buffer reads.]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="input" type="byte[]"/> |
| <param name="start" type="int"/> |
| <param name="length" type="int"/> |
| <doc> |
| <![CDATA[Resets the data that the buffer reads.]]> |
| </doc> |
| </method> |
| <method name="getPosition" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the current position in the input.]]> |
| </doc> |
| </method> |
| <method name="getLength" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the length of the input.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A reusable {@link DataInput} implementation that reads from an in-memory |
| buffer. |
| |
| <p>This saves memory over creating a new DataInputStream and |
| ByteArrayInputStream each time data is read. |
| |
| <p>Typical usage is something like the following:<pre> |
| |
| DataInputBuffer buffer = new DataInputBuffer(); |
| while (... loop condition ...) { |
| byte[] data = ... get data ...; |
| int dataLength = ... get data length ...; |
| buffer.reset(data, dataLength); |
| ... read buffer using DataInput methods ... |
| } |
| </pre>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.DataInputBuffer --> |
| <!-- start class org.apache.hadoop.io.DataOutputBuffer --> |
| <class name="DataOutputBuffer" extends="java.io.DataOutputStream" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="DataOutputBuffer" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructs a new empty buffer.]]> |
| </doc> |
| </constructor> |
| <method name="getData" return="byte[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the current contents of the buffer. |
| Data is only valid to {@link #getLength()}.]]> |
| </doc> |
| </method> |
| <method name="getLength" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the length of the valid data currently in the buffer.]]> |
| </doc> |
| </method> |
| <method name="reset" return="org.apache.hadoop.io.DataOutputBuffer" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Resets the buffer to empty.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <param name="length" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Writes bytes from a DataInput directly into the buffer.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A reusable {@link DataOutput} implementation that writes to an in-memory |
| buffer. |
| |
| <p>This saves memory over creating a new DataOutputStream and |
| ByteArrayOutputStream each time data is written. |
| |
| <p>Typical usage is something like the following:<pre> |
| |
| DataOutputBuffer buffer = new DataOutputBuffer(); |
| while (... loop condition ...) { |
| buffer.reset(); |
| ... write buffer using DataOutput methods ... |
| byte[] data = buffer.getData(); |
| int dataLength = buffer.getLength(); |
| ... write data to its ultimate destination ... |
| } |
| </pre>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.DataOutputBuffer --> |
| <!-- start class org.apache.hadoop.io.DefaultStringifier --> |
| <class name="DefaultStringifier" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Stringifier<T>"/> |
| <constructor name="DefaultStringifier" type="org.apache.hadoop.conf.Configuration, java.lang.Class<T>" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="fromString" return="T" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="str" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="obj" type="T"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="store" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="item" type="K"/> |
| <param name="keyName" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Stores the item in the configuration with the given keyName. |
| |
| @param <K> the class of the item |
| @param conf the configuration to store |
| @param item the object to be stored |
| @param keyName the name of the key to use |
| @throws IOException : forwards Exceptions from the underlying |
| {@link Serialization} classes.]]> |
| </doc> |
| </method> |
| <method name="load" return="K" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="keyName" type="java.lang.String"/> |
| <param name="itemClass" type="java.lang.Class<K>"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Restores the object from the configuration. |
| |
| @param <K> the class of the item |
| @param conf the configuration to use |
| @param keyName the name of the key to use |
| @param itemClass the class of the item |
| @return restored object |
| @throws IOException : forwards Exceptions from the underlying |
| {@link Serialization} classes.]]> |
| </doc> |
| </method> |
| <method name="storeArray" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="items" type="K[]"/> |
| <param name="keyName" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Stores the array of items in the configuration with the given keyName. |
| |
| @param <K> the class of the item |
| @param conf the configuration to use |
| @param items the objects to be stored |
| @param keyName the name of the key to use |
| @throws IndexOutOfBoundsException if the items array is empty |
| @throws IOException : forwards Exceptions from the underlying |
| {@link Serialization} classes.]]> |
| </doc> |
| </method> |
| <method name="loadArray" return="K[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="keyName" type="java.lang.String"/> |
| <param name="itemClass" type="java.lang.Class<K>"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Restores the array of objects from the configuration. |
| |
| @param <K> the class of the item |
| @param conf the configuration to use |
| @param keyName the name of the key to use |
| @param itemClass the class of the item |
| @return restored object |
| @throws IOException : forwards Exceptions from the underlying |
| {@link Serialization} classes.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[DefaultStringifier is the default implementation of the {@link Stringifier} |
| interface which stringifies the objects using base64 encoding of the |
| serialized version of the objects. The {@link Serializer} and |
| {@link Deserializer} are obtained from the {@link SerializationFactory}. |
| <br> |
| DefaultStringifier offers convenience methods to store/load objects to/from |
| the configuration. |
| |
| @param <T> the class of the objects to stringify]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.DefaultStringifier --> |
| <!-- start class org.apache.hadoop.io.FloatWritable --> |
| <class name="FloatWritable" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.WritableComparable"/> |
| <constructor name="FloatWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="FloatWritable" type="float" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="float"/> |
| <doc> |
| <![CDATA[Set the value of this FloatWritable.]]> |
| </doc> |
| </method> |
| <method name="get" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the value of this FloatWritable.]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Returns true iff <code>o</code> is a FloatWritable with the same value.]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Compares two FloatWritables.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[A WritableComparable for floats.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.FloatWritable --> |
| <!-- start class org.apache.hadoop.io.FloatWritable.Comparator --> |
| <class name="FloatWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="FloatWritable.Comparator" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| </method> |
| <doc> |
| <![CDATA[A Comparator optimized for FloatWritable.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.FloatWritable.Comparator --> |
| <!-- start class org.apache.hadoop.io.GenericWritable --> |
| <class name="GenericWritable" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <implements name="org.apache.hadoop.conf.Configurable"/> |
| <constructor name="GenericWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="obj" type="org.apache.hadoop.io.Writable"/> |
| <doc> |
| <![CDATA[Set the instance that is wrapped. |
| |
| @param obj]]> |
| </doc> |
| </method> |
| <method name="get" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the wrapped instance.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getTypes" return="java.lang.Class[]" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return all classes that may be wrapped. Subclasses should implement this |
| to return a constant array of classes.]]> |
| </doc> |
| </method> |
| <method name="getConf" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| </method> |
| <doc> |
| <![CDATA[A wrapper for Writable instances. |
| <p> |
| When two sequence files, which have same Key type but different Value |
| types, are mapped out to reduce, multiple Value types is not allowed. |
| In this case, this class can help you wrap instances with different types. |
| </p> |
| |
| <p> |
| Compared with <code>ObjectWritable</code>, this class is much more effective, |
| because <code>ObjectWritable</code> will append the class declaration as a String |
| into the output file in every Key-Value pair. |
| </p> |
| |
| <p> |
| Generic Writable implements {@link Configurable} interface, so that it will be |
| configured by the framework. The configuration is passed to the wrapped objects |
| implementing {@link Configurable} interface <i>before deserialization</i>. |
| </p> |
| |
| how to use it: <br> |
| 1. Write your own class, such as GenericObject, which extends GenericWritable.<br> |
| 2. Implements the abstract method <code>getTypes()</code>, defines |
| the classes which will be wrapped in GenericObject in application. |
| Attention: this classes defined in <code>getTypes()</code> method, must |
| implement <code>Writable</code> interface. |
| <br><br> |
| |
| The code looks like this: |
| <blockquote><pre> |
| public class GenericObject extends GenericWritable { |
| |
| private static Class[] CLASSES = { |
| ClassType1.class, |
| ClassType2.class, |
| ClassType3.class, |
| }; |
| |
| protected Class[] getTypes() { |
| return CLASSES; |
| } |
| |
| } |
| </pre></blockquote> |
| |
| @since Nov 8, 2006]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.GenericWritable --> |
| <!-- start class org.apache.hadoop.io.InputBuffer --> |
| <class name="InputBuffer" extends="java.io.FilterInputStream" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="InputBuffer" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructs a new empty buffer.]]> |
| </doc> |
| </constructor> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="input" type="byte[]"/> |
| <param name="length" type="int"/> |
| <doc> |
| <![CDATA[Resets the data that the buffer reads.]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="input" type="byte[]"/> |
| <param name="start" type="int"/> |
| <param name="length" type="int"/> |
| <doc> |
| <![CDATA[Resets the data that the buffer reads.]]> |
| </doc> |
| </method> |
| <method name="getPosition" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the current position in the input.]]> |
| </doc> |
| </method> |
| <method name="getLength" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the length of the input.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A reusable {@link InputStream} implementation that reads from an in-memory |
| buffer. |
| |
| <p>This saves memory over creating a new InputStream and |
| ByteArrayInputStream each time data is read. |
| |
| <p>Typical usage is something like the following:<pre> |
| |
| InputBuffer buffer = new InputBuffer(); |
| while (... loop condition ...) { |
| byte[] data = ... get data ...; |
| int dataLength = ... get data length ...; |
| buffer.reset(data, dataLength); |
| ... read buffer using InputStream methods ... |
| } |
| </pre> |
| @see DataInputBuffer |
| @see DataOutput]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.InputBuffer --> |
| <!-- start class org.apache.hadoop.io.IntWritable --> |
| <class name="IntWritable" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.WritableComparable"/> |
| <constructor name="IntWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="IntWritable" type="int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="int"/> |
| <doc> |
| <![CDATA[Set the value of this IntWritable.]]> |
| </doc> |
| </method> |
| <method name="get" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the value of this IntWritable.]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Returns true iff <code>o</code> is a IntWritable with the same value.]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Compares two IntWritables.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[A WritableComparable for ints.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.IntWritable --> |
| <!-- start class org.apache.hadoop.io.IntWritable.Comparator --> |
| <class name="IntWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="IntWritable.Comparator" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| </method> |
| <doc> |
| <![CDATA[A Comparator optimized for IntWritable.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.IntWritable.Comparator --> |
| <!-- start class org.apache.hadoop.io.IOUtils --> |
| <class name="IOUtils" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="IOUtils" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="copyBytes" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <param name="out" type="java.io.OutputStream"/> |
| <param name="buffSize" type="int"/> |
| <param name="close" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Copies from one stream to another. |
| @param in InputStrem to read from |
| @param out OutputStream to write to |
| @param buffSize the size of the buffer |
| @param close whether or not close the InputStream and |
| OutputStream at the end. The streams are closed in the finally clause.]]> |
| </doc> |
| </method> |
| <method name="copyBytes" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <param name="out" type="java.io.OutputStream"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Copies from one stream to another. <strong>closes the input and output streams |
| at the end</strong>. |
| @param in InputStrem to read from |
| @param out OutputStream to write to |
| @param conf the Configuration object]]> |
| </doc> |
| </method> |
| <method name="copyBytes" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <param name="out" type="java.io.OutputStream"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="close" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Copies from one stream to another. |
| @param in InputStrem to read from |
| @param out OutputStream to write to |
| @param conf the Configuration object |
| @param close whether or not close the InputStream and |
| OutputStream at the end. The streams are closed in the finally clause.]]> |
| </doc> |
| </method> |
| <method name="readFully" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <param name="buf" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Reads len bytes in a loop. |
| @param in The InputStream to read from |
| @param buf The buffer to fill |
| @param off offset from the buffer |
| @param len the length of bytes to read |
| @throws IOException if it could not read requested number of bytes |
| for any reason (including EOF)]]> |
| </doc> |
| </method> |
| <method name="skipFully" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <param name="len" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Similar to readFully(). Skips bytes in a loop. |
| @param in The InputStream to skip bytes from |
| @param len number of bytes to skip. |
| @throws IOException if it could not skip requested number of bytes |
| for any reason (including EOF)]]> |
| </doc> |
| </method> |
| <method name="closeStream" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="stream" type="java.io.Closeable"/> |
| <doc> |
| <![CDATA[Closes the stream ignoring {@link IOException} |
| @param stream the Stream to close]]> |
| </doc> |
| </method> |
| <method name="closeSocket" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="sock" type="java.net.Socket"/> |
| <doc> |
| <![CDATA[Closes the socket ignoring {@link IOException} |
| @param sock the Socket to close]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[An utility class for I/O related functionality.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.IOUtils --> |
| <!-- start class org.apache.hadoop.io.IOUtils.NullOutputStream --> |
| <class name="IOUtils.NullOutputStream" extends="java.io.OutputStream" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="IOUtils.NullOutputStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[/dev/null of OutputStreams.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.IOUtils.NullOutputStream --> |
| <!-- start class org.apache.hadoop.io.LongWritable --> |
| <class name="LongWritable" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.WritableComparable"/> |
| <constructor name="LongWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="LongWritable" type="long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="long"/> |
| <doc> |
| <![CDATA[Set the value of this LongWritable.]]> |
| </doc> |
| </method> |
| <method name="get" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the value of this LongWritable.]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Returns true iff <code>o</code> is a LongWritable with the same value.]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Compares two LongWritables.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[A WritableComparable for longs.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.LongWritable --> |
| <!-- start class org.apache.hadoop.io.LongWritable.Comparator --> |
| <class name="LongWritable.Comparator" extends="org.apache.hadoop.io.WritableComparator" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="LongWritable.Comparator" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| </method> |
| <doc> |
| <![CDATA[A Comparator optimized for LongWritable.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.LongWritable.Comparator --> |
| <!-- start class org.apache.hadoop.io.LongWritable.DecreasingComparator --> |
| <class name="LongWritable.DecreasingComparator" extends="org.apache.hadoop.io.LongWritable.Comparator" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="LongWritable.DecreasingComparator" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="a" type="org.apache.hadoop.io.WritableComparable"/> |
| <param name="b" type="org.apache.hadoop.io.WritableComparable"/> |
| </method> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| </method> |
| <doc> |
| <![CDATA[A decreasing Comparator optimized for LongWritable.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.LongWritable.DecreasingComparator --> |
| <!-- start class org.apache.hadoop.io.MapFile --> |
| <class name="MapFile" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MapFile" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="rename" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="oldName" type="java.lang.String"/> |
| <param name="newName" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Renames an existing map directory.]]> |
| </doc> |
| </method> |
| <method name="delete" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="name" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Deletes the named map file.]]> |
| </doc> |
| </method> |
| <method name="fix" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="dir" type="org.apache.hadoop.fs.Path"/> |
| <param name="keyClass" type="java.lang.Class"/> |
| <param name="valueClass" type="java.lang.Class"/> |
| <param name="dryrun" type="boolean"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[This method attempts to fix a corrupt MapFile by re-creating its index. |
| @param fs filesystem |
| @param dir directory containing the MapFile data and index |
| @param keyClass key class (has to be a subclass of Writable) |
| @param valueClass value class (has to be a subclass of Writable) |
| @param dryrun do not perform any changes, just report what needs to be done |
| @return number of valid entries in this MapFile, or -1 if no fixing was needed |
| @throws Exception]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| </method> |
| <field name="INDEX_FILE_NAME" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The name of the index file.]]> |
| </doc> |
| </field> |
| <field name="DATA_FILE_NAME" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The name of the data file.]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[A file-based map from keys to values. |
| |
| <p>A map is a directory containing two files, the <code>data</code> file, |
| containing all keys and values in the map, and a smaller <code>index</code> |
| file, containing a fraction of the keys. The fraction is determined by |
| {@link Writer#getIndexInterval()}. |
| |
| <p>The index file is read entirely into memory. Thus key implementations |
| should try to keep themselves small. |
| |
| <p>Map files are created by adding entries in-order. To maintain a large |
| database, perform updates by copying the previous version of a database and |
| merging in a sorted change list, to create a new version of the database in |
| a new file. Sorting large change lists can be done with {@link |
| SequenceFile.Sorter}.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.MapFile --> |
| <!-- start class org.apache.hadoop.io.MapFile.Reader --> |
| <class name="MapFile.Reader" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct a map reader for the named map.]]> |
| </doc> |
| </constructor> |
| <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct a map reader for the named map using the named comparator.]]> |
| </doc> |
| </constructor> |
| <constructor name="MapFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration, boolean" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Hook to allow subclasses to defer opening streams until further |
| initialization is complete. |
| @see #createDataFileReader(FileSystem, Path, Configuration)]]> |
| </doc> |
| </constructor> |
| <method name="getKeyClass" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the class of keys in this file.]]> |
| </doc> |
| </method> |
| <method name="getValueClass" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the class of values in this file.]]> |
| </doc> |
| </method> |
| <method name="open" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="dirName" type="java.lang.String"/> |
| <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createDataFileReader" return="org.apache.hadoop.io.SequenceFile.Reader" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="dataFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Override this method to specialize the type of |
| {@link SequenceFile.Reader} returned.]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Re-positions the reader before its first key.]]> |
| </doc> |
| </method> |
| <method name="midKey" return="org.apache.hadoop.io.WritableComparable" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the key at approximately the middle of the file. |
| |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="finalKey" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Reads the final key from the file. |
| |
| @param key key to read into]]> |
| </doc> |
| </method> |
| <method name="seek" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Positions the reader at the named key, or if none such exists, at the |
| first entry after the named key. Returns true iff the named key exists |
| in this map.]]> |
| </doc> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.WritableComparable"/> |
| <param name="val" type="org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read the next key/value pair in the map into <code>key</code> and |
| <code>val</code>. Returns true if such a pair exists and false when at |
| the end of the map]]> |
| </doc> |
| </method> |
| <method name="get" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.WritableComparable"/> |
| <param name="val" type="org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the value for the named key, or null if none exists.]]> |
| </doc> |
| </method> |
| <method name="getClosest" return="org.apache.hadoop.io.WritableComparable" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.WritableComparable"/> |
| <param name="val" type="org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Finds the record that is the closest match to the specified key. |
| Returns <code>key</code> or if it does not exist, at the first entry |
| after the named key. |
| |
| - * @param key - key that we're trying to find |
| - * @param val - data value if key is found |
| - * @return - the key that was the closest match or null if eof.]]> |
| </doc> |
| </method> |
| <method name="getClosest" return="org.apache.hadoop.io.WritableComparable" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.WritableComparable"/> |
| <param name="val" type="org.apache.hadoop.io.Writable"/> |
| <param name="before" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Finds the record that is the closest match to the specified key. |
| |
| @param key - key that we're trying to find |
| @param val - data value if key is found |
| @param before - IF true, and <code>key</code> does not exist, return |
| the first entry that falls just before the <code>key</code>. Otherwise, |
| return the record that sorts just after. |
| @return - the key that was the closest match or null if eof.]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Close the map.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Provide access to an existing map.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.MapFile.Reader --> |
| <!-- start class org.apache.hadoop.io.MapFile.Writer --> |
| <class name="MapFile.Writer" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create the named map for keys of the named class.]]> |
| </doc> |
| </constructor> |
| <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create the named map for keys of the named class.]]> |
| </doc> |
| </constructor> |
| <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create the named map for keys of the named class.]]> |
| </doc> |
| </constructor> |
| <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create the named map for keys of the named class.]]> |
| </doc> |
| </constructor> |
| <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create the named map using the named key comparator.]]> |
| </doc> |
| </constructor> |
| <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create the named map using the named key comparator.]]> |
| </doc> |
| </constructor> |
| <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.util.Progressable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create the named map using the named key comparator.]]> |
| </doc> |
| </constructor> |
| <constructor name="MapFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType, org.apache.hadoop.io.compress.CompressionCodec, org.apache.hadoop.util.Progressable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create the named map using the named key comparator.]]> |
| </doc> |
| </constructor> |
| <method name="getIndexInterval" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The number of entries that are added before an index entry is added.]]> |
| </doc> |
| </method> |
| <method name="setIndexInterval" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="interval" type="int"/> |
| <doc> |
| <![CDATA[Sets the index interval. |
| @see #getIndexInterval()]]> |
| </doc> |
| </method> |
| <method name="setIndexInterval" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="interval" type="int"/> |
| <doc> |
| <![CDATA[Sets the index interval and stores it in conf |
| @see #getIndexInterval()]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Close the map.]]> |
| </doc> |
| </method> |
| <method name="append" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.WritableComparable"/> |
| <param name="val" type="org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Append a key/value pair to the map. The key must be greater or equal |
| to the previous key added to the map.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Writes a new map.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.MapFile.Writer --> |
| <!-- start class org.apache.hadoop.io.MapWritable --> |
| <class name="MapWritable" extends="org.apache.hadoop.io.AbstractMapWritable" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.util.Map<org.apache.hadoop.io.Writable, org.apache.hadoop.io.Writable>"/> |
| <constructor name="MapWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Default constructor.]]> |
| </doc> |
| </constructor> |
| <constructor name="MapWritable" type="org.apache.hadoop.io.MapWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Copy constructor. |
| |
| @param other the map to copy from]]> |
| </doc> |
| </constructor> |
| <method name="clear" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="containsKey" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="containsValue" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="entrySet" return="java.util.Set<java.util.Map.Entry<org.apache.hadoop.io.Writable, org.apache.hadoop.io.Writable>>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="get" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="isEmpty" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="keySet" return="java.util.Set<org.apache.hadoop.io.Writable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="put" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.Writable"/> |
| <param name="value" type="org.apache.hadoop.io.Writable"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="putAll" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="t" type="java.util.Map<? extends org.apache.hadoop.io.Writable, ? extends org.apache.hadoop.io.Writable>"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="remove" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="size" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="values" return="java.util.Collection<org.apache.hadoop.io.Writable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A Writable Map.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.MapWritable --> |
| <!-- start class org.apache.hadoop.io.MD5Hash --> |
| <class name="MD5Hash" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.WritableComparable"/> |
| <constructor name="MD5Hash" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructs an MD5Hash.]]> |
| </doc> |
| </constructor> |
| <constructor name="MD5Hash" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructs an MD5Hash from a hex string.]]> |
| </doc> |
| </constructor> |
| <constructor name="MD5Hash" type="byte[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructs an MD5Hash with a specified value.]]> |
| </doc> |
| </constructor> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="read" return="org.apache.hadoop.io.MD5Hash" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Constructs, reads and returns an instance.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="that" type="org.apache.hadoop.io.MD5Hash"/> |
| <doc> |
| <![CDATA[Copy the contents of another instance into this instance.]]> |
| </doc> |
| </method> |
| <method name="getDigest" return="byte[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the digest bytes.]]> |
| </doc> |
| </method> |
| <method name="digest" return="org.apache.hadoop.io.MD5Hash" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="data" type="byte[]"/> |
| <doc> |
| <![CDATA[Construct a hash value for a byte array.]]> |
| </doc> |
| </method> |
| <method name="digest" return="org.apache.hadoop.io.MD5Hash" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="data" type="byte[]"/> |
| <param name="start" type="int"/> |
| <param name="len" type="int"/> |
| <doc> |
| <![CDATA[Construct a hash value for a byte array.]]> |
| </doc> |
| </method> |
| <method name="digest" return="org.apache.hadoop.io.MD5Hash" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="string" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Construct a hash value for a String.]]> |
| </doc> |
| </method> |
| <method name="digest" return="org.apache.hadoop.io.MD5Hash" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="utf8" type="org.apache.hadoop.io.UTF8"/> |
| <doc> |
| <![CDATA[Construct a hash value for a String.]]> |
| </doc> |
| </method> |
| <method name="halfDigest" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a half-sized version of this MD5. Fits in a long]]> |
| </doc> |
| </method> |
| <method name="quarterDigest" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return a 32-bit digest of the MD5. |
| @return the first 4 bytes of the md5]]> |
| </doc> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Returns true iff <code>o</code> is an MD5Hash whose digest contains the |
| same values.]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns a hash code value for this object. |
| Only uses the first 4 bytes, since md5s are evenly distributed.]]> |
| </doc> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Compares this object with the specified object for order.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns a string representation of this object.]]> |
| </doc> |
| </method> |
| <method name="setDigest" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="hex" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Sets the digest value from a hex string.]]> |
| </doc> |
| </method> |
| <field name="MD5_LEN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[A Writable for MD5 hash values.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.MD5Hash --> |
| <!-- start class org.apache.hadoop.io.MD5Hash.Comparator --> |
| <class name="MD5Hash.Comparator" extends="org.apache.hadoop.io.WritableComparator" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MD5Hash.Comparator" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| </method> |
| <doc> |
| <![CDATA[A WritableComparator optimized for MD5Hash keys.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.MD5Hash.Comparator --> |
| <!-- start class org.apache.hadoop.io.MultipleIOException --> |
| <class name="MultipleIOException" extends="java.io.IOException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getExceptions" return="java.util.List<java.io.IOException>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the underlying exceptions]]> |
| </doc> |
| </method> |
| <method name="createIOException" return="java.io.IOException" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="exceptions" type="java.util.List<java.io.IOException>"/> |
| <doc> |
| <![CDATA[A convenient method to create an {@link IOException}.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Encapsulate a list of {@link IOException} into an {@link IOException}]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.MultipleIOException --> |
| <!-- start class org.apache.hadoop.io.NullWritable --> |
| <class name="NullWritable" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.WritableComparable"/> |
| <method name="get" return="org.apache.hadoop.io.NullWritable" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the single instance of this class.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="other" type="java.lang.Object"/> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="other" type="java.lang.Object"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[Singleton Writable with no data.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.NullWritable --> |
| <!-- start class org.apache.hadoop.io.ObjectWritable --> |
| <class name="ObjectWritable" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <implements name="org.apache.hadoop.conf.Configurable"/> |
| <constructor name="ObjectWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="ObjectWritable" type="java.lang.Object" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="ObjectWritable" type="java.lang.Class, java.lang.Object" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="get" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the instance, or null if none.]]> |
| </doc> |
| </method> |
| <method name="getDeclaredClass" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the class this is meant to be.]]> |
| </doc> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="instance" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Reset the instance.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeObject" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <param name="instance" type="java.lang.Object"/> |
| <param name="declaredClass" type="java.lang.Class"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write a {@link Writable}, {@link String}, primitive type, or an array of |
| the preceding.]]> |
| </doc> |
| </method> |
| <method name="readObject" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of |
| the preceding.]]> |
| </doc> |
| </method> |
| <method name="readObject" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <param name="objectWritable" type="org.apache.hadoop.io.ObjectWritable"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read a {@link Writable}, {@link String}, primitive type, or an array of |
| the preceding.]]> |
| </doc> |
| </method> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| </method> |
| <method name="getConf" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[A polymorphic Writable that writes an instance with it's class name. |
| Handles arrays, strings and primitive types without a Writable wrapper.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.ObjectWritable --> |
| <!-- start class org.apache.hadoop.io.OutputBuffer --> |
| <class name="OutputBuffer" extends="java.io.FilterOutputStream" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="OutputBuffer" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructs a new empty buffer.]]> |
| </doc> |
| </constructor> |
| <method name="getData" return="byte[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the current contents of the buffer. |
| Data is only valid to {@link #getLength()}.]]> |
| </doc> |
| </method> |
| <method name="getLength" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the length of the valid data currently in the buffer.]]> |
| </doc> |
| </method> |
| <method name="reset" return="org.apache.hadoop.io.OutputBuffer" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Resets the buffer to empty.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <param name="length" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Writes bytes from a InputStream directly into the buffer.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A reusable {@link OutputStream} implementation that writes to an in-memory |
| buffer. |
| |
| <p>This saves memory over creating a new OutputStream and |
| ByteArrayOutputStream each time data is written. |
| |
| <p>Typical usage is something like the following:<pre> |
| |
| OutputBuffer buffer = new OutputBuffer(); |
| while (... loop condition ...) { |
| buffer.reset(); |
| ... write buffer using OutputStream methods ... |
| byte[] data = buffer.getData(); |
| int dataLength = buffer.getLength(); |
| ... write data to its ultimate destination ... |
| } |
| </pre> |
| @see DataOutputBuffer |
| @see InputBuffer]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.OutputBuffer --> |
| <!-- start interface org.apache.hadoop.io.RawComparator --> |
| <interface name="RawComparator" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.util.Comparator<T>"/> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| </method> |
| <doc> |
| <![CDATA[<p> |
| A {@link Comparator} that operates directly on byte representations of |
| objects. |
| </p> |
| @param <T> |
| @see DeserializerComparator]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.io.RawComparator --> |
| <!-- start class org.apache.hadoop.io.SequenceFile --> |
| <class name="SequenceFile" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="Use {@link org.apache.hadoop.mapred.JobConf#getMapOutputCompressionType()} |
| to get {@link CompressionType} for intermediate map-outputs or |
| {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)} |
| to get {@link CompressionType} for job-outputs."> |
| <param name="job" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Get the compression type for the reduce outputs |
| @param job the job config to look in |
| @return the kind of compression to use |
| @deprecated Use {@link org.apache.hadoop.mapred.JobConf#getMapOutputCompressionType()} |
| to get {@link CompressionType} for intermediate map-outputs or |
| {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#getOutputCompressionType(org.apache.hadoop.mapred.JobConf)} |
| to get {@link CompressionType} for job-outputs.]]> |
| </doc> |
| </method> |
| <method name="setCompressionType" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="Use the one of the many SequenceFile.createWriter methods to specify |
| the {@link CompressionType} while creating the {@link SequenceFile} or |
| {@link org.apache.hadoop.mapred.JobConf#setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType)} |
| to specify the {@link CompressionType} for intermediate map-outputs or |
| {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)} |
| to specify the {@link CompressionType} for job-outputs. |
| or"> |
| <param name="job" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="val" type="org.apache.hadoop.io.SequenceFile.CompressionType"/> |
| <doc> |
| <![CDATA[Set the compression type for sequence files. |
| @param job the configuration to modify |
| @param val the new compression type (none, block, record) |
| @deprecated Use the one of the many SequenceFile.createWriter methods to specify |
| the {@link CompressionType} while creating the {@link SequenceFile} or |
| {@link org.apache.hadoop.mapred.JobConf#setMapOutputCompressionType(org.apache.hadoop.io.SequenceFile.CompressionType)} |
| to specify the {@link CompressionType} for intermediate map-outputs or |
| {@link org.apache.hadoop.mapred.SequenceFileOutputFormat#setOutputCompressionType(org.apache.hadoop.mapred.JobConf, org.apache.hadoop.io.SequenceFile.CompressionType)} |
| to specify the {@link CompressionType} for job-outputs. |
| or]]> |
| </doc> |
| </method> |
| <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="name" type="org.apache.hadoop.fs.Path"/> |
| <param name="keyClass" type="java.lang.Class"/> |
| <param name="valClass" type="java.lang.Class"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct the preferred type of SequenceFile Writer. |
| @param fs The configured filesystem. |
| @param conf The configuration. |
| @param name The name of the file. |
| @param keyClass The 'key' type. |
| @param valClass The 'value' type. |
| @return Returns the handle to the constructed SequenceFile Writer. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="name" type="org.apache.hadoop.fs.Path"/> |
| <param name="keyClass" type="java.lang.Class"/> |
| <param name="valClass" type="java.lang.Class"/> |
| <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct the preferred type of SequenceFile Writer. |
| @param fs The configured filesystem. |
| @param conf The configuration. |
| @param name The name of the file. |
| @param keyClass The 'key' type. |
| @param valClass The 'value' type. |
| @param compressionType The compression type. |
| @return Returns the handle to the constructed SequenceFile Writer. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="name" type="org.apache.hadoop.fs.Path"/> |
| <param name="keyClass" type="java.lang.Class"/> |
| <param name="valClass" type="java.lang.Class"/> |
| <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct the preferred type of SequenceFile Writer. |
| @param fs The configured filesystem. |
| @param conf The configuration. |
| @param name The name of the file. |
| @param keyClass The 'key' type. |
| @param valClass The 'value' type. |
| @param compressionType The compression type. |
| @param progress The Progressable object to track progress. |
| @return Returns the handle to the constructed SequenceFile Writer. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="name" type="org.apache.hadoop.fs.Path"/> |
| <param name="keyClass" type="java.lang.Class"/> |
| <param name="valClass" type="java.lang.Class"/> |
| <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/> |
| <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct the preferred type of SequenceFile Writer. |
| @param fs The configured filesystem. |
| @param conf The configuration. |
| @param name The name of the file. |
| @param keyClass The 'key' type. |
| @param valClass The 'value' type. |
| @param compressionType The compression type. |
| @param codec The compression codec. |
| @return Returns the handle to the constructed SequenceFile Writer. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="name" type="org.apache.hadoop.fs.Path"/> |
| <param name="keyClass" type="java.lang.Class"/> |
| <param name="valClass" type="java.lang.Class"/> |
| <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/> |
| <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct the preferred type of SequenceFile Writer. |
| @param fs The configured filesystem. |
| @param conf The configuration. |
| @param name The name of the file. |
| @param keyClass The 'key' type. |
| @param valClass The 'value' type. |
| @param compressionType The compression type. |
| @param codec The compression codec. |
| @param progress The Progressable object to track progress. |
| @param metadata The metadata of the file. |
| @return Returns the handle to the constructed SequenceFile Writer. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="name" type="org.apache.hadoop.fs.Path"/> |
| <param name="keyClass" type="java.lang.Class"/> |
| <param name="valClass" type="java.lang.Class"/> |
| <param name="bufferSize" type="int"/> |
| <param name="replication" type="short"/> |
| <param name="blockSize" type="long"/> |
| <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/> |
| <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct the preferred type of SequenceFile Writer. |
| @param fs The configured filesystem. |
| @param conf The configuration. |
| @param name The name of the file. |
| @param keyClass The 'key' type. |
| @param valClass The 'value' type. |
| @param bufferSize buffer size for the underlaying outputstream. |
| @param replication replication factor for the file. |
| @param blockSize block size for the file. |
| @param compressionType The compression type. |
| @param codec The compression codec. |
| @param progress The Progressable object to track progress. |
| @param metadata The metadata of the file. |
| @return Returns the handle to the constructed SequenceFile Writer. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="name" type="org.apache.hadoop.fs.Path"/> |
| <param name="keyClass" type="java.lang.Class"/> |
| <param name="valClass" type="java.lang.Class"/> |
| <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/> |
| <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct the preferred type of SequenceFile Writer. |
| @param fs The configured filesystem. |
| @param conf The configuration. |
| @param name The name of the file. |
| @param keyClass The 'key' type. |
| @param valClass The 'value' type. |
| @param compressionType The compression type. |
| @param codec The compression codec. |
| @param progress The Progressable object to track progress. |
| @return Returns the handle to the constructed SequenceFile Writer. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/> |
| <param name="keyClass" type="java.lang.Class"/> |
| <param name="valClass" type="java.lang.Class"/> |
| <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/> |
| <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/> |
| <param name="metadata" type="org.apache.hadoop.io.SequenceFile.Metadata"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer. |
| @param conf The configuration. |
| @param out The stream on top which the writer is to be constructed. |
| @param keyClass The 'key' type. |
| @param valClass The 'value' type. |
| @param compressionType The compression type. |
| @param codec The compression codec. |
| @param metadata The metadata of the file. |
| @return Returns the handle to the constructed SequenceFile Writer. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="createWriter" return="org.apache.hadoop.io.SequenceFile.Writer" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="out" type="org.apache.hadoop.fs.FSDataOutputStream"/> |
| <param name="keyClass" type="java.lang.Class"/> |
| <param name="valClass" type="java.lang.Class"/> |
| <param name="compressionType" type="org.apache.hadoop.io.SequenceFile.CompressionType"/> |
| <param name="codec" type="org.apache.hadoop.io.compress.CompressionCodec"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct the preferred type of 'raw' SequenceFile Writer. |
| @param conf The configuration. |
| @param out The stream on top which the writer is to be constructed. |
| @param keyClass The 'key' type. |
| @param valClass The 'value' type. |
| @param compressionType The compression type. |
| @param codec The compression codec. |
| @return Returns the handle to the constructed SequenceFile Writer. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <field name="SYNC_INTERVAL" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The number of bytes between sync points.]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[<code>SequenceFile</code>s are flat files consisting of binary key/value |
| pairs. |
| |
| <p><code>SequenceFile</code> provides {@link Writer}, {@link Reader} and |
| {@link Sorter} classes for writing, reading and sorting respectively.</p> |
| |
| There are three <code>SequenceFile</code> <code>Writer</code>s based on the |
| {@link CompressionType} used to compress key/value pairs: |
| <ol> |
| <li> |
| <code>Writer</code> : Uncompressed records. |
| </li> |
| <li> |
| <code>RecordCompressWriter</code> : Record-compressed files, only compress |
| values. |
| </li> |
| <li> |
| <code>BlockCompressWriter</code> : Block-compressed files, both keys & |
| values are collected in 'blocks' |
| separately and compressed. The size of |
| the 'block' is configurable. |
| </ol> |
| |
| <p>The actual compression algorithm used to compress key and/or values can be |
| specified by using the appropriate {@link CompressionCodec}.</p> |
| |
| <p>The recommended way is to use the static <tt>createWriter</tt> methods |
| provided by the <code>SequenceFile</code> to chose the preferred format.</p> |
| |
| <p>The {@link Reader} acts as the bridge and can read any of the above |
| <code>SequenceFile</code> formats.</p> |
| |
| <h4 id="Formats">SequenceFile Formats</h4> |
| |
| <p>Essentially there are 3 different formats for <code>SequenceFile</code>s |
| depending on the <code>CompressionType</code> specified. All of them share a |
| <a href="#Header">common header</a> described below. |
| |
| <h5 id="Header">SequenceFile Header</h5> |
| <ul> |
| <li> |
| version - 3 bytes of magic header <b>SEQ</b>, followed by 1 byte of actual |
| version number (e.g. SEQ4 or SEQ6) |
| </li> |
| <li> |
| keyClassName -key class |
| </li> |
| <li> |
| valueClassName - value class |
| </li> |
| <li> |
| compression - A boolean which specifies if compression is turned on for |
| keys/values in this file. |
| </li> |
| <li> |
| blockCompression - A boolean which specifies if block-compression is |
| turned on for keys/values in this file. |
| </li> |
| <li> |
| compression codec - <code>CompressionCodec</code> class which is used for |
| compression of keys and/or values (if compression is |
| enabled). |
| </li> |
| <li> |
| metadata - {@link Metadata} for this file. |
| </li> |
| <li> |
| sync - A sync marker to denote end of the header. |
| </li> |
| </ul> |
| |
| <h5 id="#UncompressedFormat">Uncompressed SequenceFile Format</h5> |
| <ul> |
| <li> |
| <a href="#Header">Header</a> |
| </li> |
| <li> |
| Record |
| <ul> |
| <li>Record length</li> |
| <li>Key length</li> |
| <li>Key</li> |
| <li>Value</li> |
| </ul> |
| </li> |
| <li> |
| A sync-marker every few <code>100</code> bytes or so. |
| </li> |
| </ul> |
| |
| <h5 id="#RecordCompressedFormat">Record-Compressed SequenceFile Format</h5> |
| <ul> |
| <li> |
| <a href="#Header">Header</a> |
| </li> |
| <li> |
| Record |
| <ul> |
| <li>Record length</li> |
| <li>Key length</li> |
| <li>Key</li> |
| <li><i>Compressed</i> Value</li> |
| </ul> |
| </li> |
| <li> |
| A sync-marker every few <code>100</code> bytes or so. |
| </li> |
| </ul> |
| |
| <h5 id="#BlockCompressedFormat">Block-Compressed SequenceFile Format</h5> |
| <ul> |
| <li> |
| <a href="#Header">Header</a> |
| </li> |
| <li> |
| Record <i>Block</i> |
| <ul> |
| <li>Compressed key-lengths block-size</li> |
| <li>Compressed key-lengths block</li> |
| <li>Compressed keys block-size</li> |
| <li>Compressed keys block</li> |
| <li>Compressed value-lengths block-size</li> |
| <li>Compressed value-lengths block</li> |
| <li>Compressed values block-size</li> |
| <li>Compressed values block</li> |
| </ul> |
| </li> |
| <li> |
| A sync-marker every few <code>100</code> bytes or so. |
| </li> |
| </ul> |
| |
| <p>The compressed blocks of key lengths and value lengths consist of the |
| actual lengths of individual keys/values encoded in ZeroCompressedInteger |
| format.</p> |
| |
| @see CompressionCodec]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.SequenceFile --> |
| <!-- start class org.apache.hadoop.io.SequenceFile.CompressionType --> |
| <class name="SequenceFile.CompressionType" extends="java.lang.Enum<org.apache.hadoop.io.SequenceFile.CompressionType>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.io.SequenceFile.CompressionType[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.io.SequenceFile.CompressionType" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| <doc> |
| <![CDATA[The compression type used to compress key/value pairs in the |
| {@link SequenceFile}. |
| |
| @see SequenceFile.Writer]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.SequenceFile.CompressionType --> |
| <!-- start class org.apache.hadoop.io.SequenceFile.Metadata --> |
| <class name="SequenceFile.Metadata" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <constructor name="SequenceFile.Metadata" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="SequenceFile.Metadata" type="java.util.TreeMap<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="get" return="org.apache.hadoop.io.Text" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="org.apache.hadoop.io.Text"/> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="org.apache.hadoop.io.Text"/> |
| <param name="value" type="org.apache.hadoop.io.Text"/> |
| </method> |
| <method name="getMetadata" return="java.util.TreeMap<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="other" type="org.apache.hadoop.io.SequenceFile.Metadata"/> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[The class encapsulating with the metadata of a file. |
| The metadata of a file is a list of attribute name/value |
| pairs of Text type.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.SequenceFile.Metadata --> |
| <!-- start class org.apache.hadoop.io.SequenceFile.Reader --> |
| <class name="SequenceFile.Reader" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.io.Closeable"/> |
| <constructor name="SequenceFile.Reader" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Open the named file.]]> |
| </doc> |
| </constructor> |
| <method name="openFile" return="org.apache.hadoop.fs.FSDataInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="file" type="org.apache.hadoop.fs.Path"/> |
| <param name="bufferSize" type="int"/> |
| <param name="length" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Override this method to specialize the type of |
| {@link FSDataInputStream} returned.]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Close the file.]]> |
| </doc> |
| </method> |
| <method name="getKeyClassName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the name of the key class.]]> |
| </doc> |
| </method> |
| <method name="getKeyClass" return="java.lang.Class" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the class of keys in this file.]]> |
| </doc> |
| </method> |
| <method name="getValueClassName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the name of the value class.]]> |
| </doc> |
| </method> |
| <method name="getValueClass" return="java.lang.Class" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the class of values in this file.]]> |
| </doc> |
| </method> |
| <method name="isCompressed" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns true if values are compressed.]]> |
| </doc> |
| </method> |
| <method name="isBlockCompressed" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns true if records are block-compressed.]]> |
| </doc> |
| </method> |
| <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the compression codec of data in this file.]]> |
| </doc> |
| </method> |
| <method name="getMetadata" return="org.apache.hadoop.io.SequenceFile.Metadata" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the metadata object of the file]]> |
| </doc> |
| </method> |
| <method name="getCurrentValue" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the 'value' corresponding to the last read 'key'. |
| @param val : The 'value' to be read. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read the next key in the file into <code>key</code>, skipping its |
| value. True if another entry exists, and false at end of file.]]> |
| </doc> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.Writable"/> |
| <param name="val" type="org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read the next key/value pair in the file into <code>key</code> and |
| <code>val</code>. Returns true if such a pair exists and false when at |
| end of file]]> |
| </doc> |
| </method> |
| <method name="next" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}."> |
| <param name="buffer" type="org.apache.hadoop.io.DataOutputBuffer"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@deprecated Call {@link #nextRaw(DataOutputBuffer,SequenceFile.ValueBytes)}.]]> |
| </doc> |
| </method> |
| <method name="createValueBytes" return="org.apache.hadoop.io.SequenceFile.ValueBytes" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="nextRaw" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/> |
| <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read 'raw' records. |
| @param key - The buffer into which the key is read |
| @param val - The 'raw' value |
| @return Returns the total record length or -1 for end of file |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="nextRawKey" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.DataOutputBuffer"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read 'raw' keys. |
| @param key - The buffer into which the key is read |
| @return Returns the key length or -1 for end of file |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="nextRawValue" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read 'raw' values. |
| @param val - The 'raw' value |
| @return Returns the value length |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="seek" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="position" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Set the current byte position in the input file. |
| |
| <p>The position passed must be a position returned by {@link |
| SequenceFile.Writer#getLength()} when writing this file. To seek to an arbitrary |
| position, use {@link SequenceFile.Reader#sync(long)}.]]> |
| </doc> |
| </method> |
| <method name="sync" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="position" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Seek to the next sync mark past a given position.]]> |
| </doc> |
| </method> |
| <method name="syncSeen" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns true iff the previous call to next passed a sync mark.]]> |
| </doc> |
| </method> |
| <method name="getPosition" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the current byte position in the input file.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the name of the file.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Reads key/value pairs from a sequence-format file.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.SequenceFile.Reader --> |
| <!-- start class org.apache.hadoop.io.SequenceFile.Sorter --> |
| <class name="SequenceFile.Sorter" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, java.lang.Class, java.lang.Class, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Sort and merge files containing the named classes.]]> |
| </doc> |
| </constructor> |
| <constructor name="SequenceFile.Sorter" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.io.RawComparator, java.lang.Class, java.lang.Class, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Sort and merge using an arbitrary {@link RawComparator}.]]> |
| </doc> |
| </constructor> |
| <method name="setFactor" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="factor" type="int"/> |
| <doc> |
| <![CDATA[Set the number of streams to merge at once.]]> |
| </doc> |
| </method> |
| <method name="getFactor" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the number of streams to merge at once.]]> |
| </doc> |
| </method> |
| <method name="setMemory" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="memory" type="int"/> |
| <doc> |
| <![CDATA[Set the total amount of buffer memory, in bytes.]]> |
| </doc> |
| </method> |
| <method name="getMemory" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the total amount of buffer memory, in bytes.]]> |
| </doc> |
| </method> |
| <method name="setProgressable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="progressable" type="org.apache.hadoop.util.Progressable"/> |
| <doc> |
| <![CDATA[Set the progressable object in order to report progress.]]> |
| </doc> |
| </method> |
| <method name="sort" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/> |
| <param name="outFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="deleteInput" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Perform a file sort from a set of input files into an output file. |
| @param inFiles the files to be sorted |
| @param outFile the sorted output file |
| @param deleteInput should the input files be deleted as they are read?]]> |
| </doc> |
| </method> |
| <method name="sortAndIterate" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/> |
| <param name="tempDir" type="org.apache.hadoop.fs.Path"/> |
| <param name="deleteInput" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Perform a file sort from a set of input files and return an iterator. |
| @param inFiles the files to be sorted |
| @param tempDir the directory where temp files are created during sort |
| @param deleteInput should the input files be deleted as they are read? |
| @return iterator the RawKeyValueIterator]]> |
| </doc> |
| </method> |
| <method name="sort" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="inFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="outFile" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The backwards compatible interface to sort. |
| @param inFile the input file to sort |
| @param outFile the sorted output file]]> |
| </doc> |
| </method> |
| <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="segments" type="java.util.List<org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor>"/> |
| <param name="tmpDir" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Merges the list of segments of type <code>SegmentDescriptor</code> |
| @param segments the list of SegmentDescriptors |
| @param tmpDir the directory to write temporary files into |
| @return RawKeyValueIterator |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="inNames" type="org.apache.hadoop.fs.Path[]"/> |
| <param name="deleteInputs" type="boolean"/> |
| <param name="tmpDir" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Merges the contents of files passed in Path[] using a max factor value |
| that is already set |
| @param inNames the array of path names |
| @param deleteInputs true if the input files should be deleted when |
| unnecessary |
| @param tmpDir the directory to write temporary files into |
| @return RawKeyValueIteratorMergeQueue |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="inNames" type="org.apache.hadoop.fs.Path[]"/> |
| <param name="deleteInputs" type="boolean"/> |
| <param name="factor" type="int"/> |
| <param name="tmpDir" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Merges the contents of files passed in Path[] |
| @param inNames the array of path names |
| @param deleteInputs true if the input files should be deleted when |
| unnecessary |
| @param factor the factor that will be used as the maximum merge fan-in |
| @param tmpDir the directory to write temporary files into |
| @return RawKeyValueIteratorMergeQueue |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="merge" return="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="inNames" type="org.apache.hadoop.fs.Path[]"/> |
| <param name="tempDir" type="org.apache.hadoop.fs.Path"/> |
| <param name="deleteInputs" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Merges the contents of files passed in Path[] |
| @param inNames the array of path names |
| @param tempDir the directory for creating temp files during merge |
| @param deleteInputs true if the input files should be deleted when |
| unnecessary |
| @return RawKeyValueIteratorMergeQueue |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="cloneFileAttributes" return="org.apache.hadoop.io.SequenceFile.Writer" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="inputFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="outputFile" type="org.apache.hadoop.fs.Path"/> |
| <param name="prog" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Clones the attributes (like compression of the input file and creates a |
| corresponding Writer |
| @param inputFile the path of the input file whose attributes should be |
| cloned |
| @param outputFile the path of the output file |
| @param prog the Progressable to report status during the file write |
| @return Writer |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="writeFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="records" type="org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator"/> |
| <param name="writer" type="org.apache.hadoop.io.SequenceFile.Writer"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Writes records from RawKeyValueIterator into a file represented by the |
| passed writer |
| @param records the RawKeyValueIterator |
| @param writer the Writer created earlier |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="merge" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="inFiles" type="org.apache.hadoop.fs.Path[]"/> |
| <param name="outFile" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Merge the provided files. |
| @param inFiles the array of input path names |
| @param outFile the final output file |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Sorts key/value pairs in a sequence-format file. |
| |
| <p>For best performance, applications should make sure that the {@link |
| Writable#readFields(DataInput)} implementation of their keys is |
| very efficient. In particular, it should avoid allocating memory.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.SequenceFile.Sorter --> |
| <!-- start interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator --> |
| <interface name="SequenceFile.Sorter.RawKeyValueIterator" abstract="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Gets the current raw key |
| @return DataOutputBuffer |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getValue" return="org.apache.hadoop.io.SequenceFile.ValueBytes" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Gets the current raw value |
| @return ValueBytes |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Sets up the current key and value (for getKey and getValue) |
| @return true if there exists a key/value, false otherwise |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[closes the iterator so that the underlying streams can be closed |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getProgress" return="org.apache.hadoop.util.Progress" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Gets the Progress object; this has a float (0.0 - 1.0) |
| indicating the bytes processed by the iterator so far]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[The interface to iterate over raw keys/values of SequenceFiles.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator --> |
| <!-- start class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor --> |
| <class name="SequenceFile.Sorter.SegmentDescriptor" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.lang.Comparable"/> |
| <constructor name="SequenceFile.Sorter.SegmentDescriptor" type="long, long, org.apache.hadoop.fs.Path" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructs a segment |
| @param segmentOffset the offset of the segment in the file |
| @param segmentLength the length of the segment |
| @param segmentPathName the path name of the file containing the segment]]> |
| </doc> |
| </constructor> |
| <method name="doSync" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Do the sync checks]]> |
| </doc> |
| </method> |
| <method name="preserveInput" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="preserve" type="boolean"/> |
| <doc> |
| <![CDATA[Whether to delete the files when no longer needed]]> |
| </doc> |
| </method> |
| <method name="shouldPreserveInput" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="nextRawKey" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Fills up the rawKey object with the key returned by the Reader |
| @return true if there is a key returned; false, otherwise |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="nextRawValue" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="rawValue" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Fills up the passed rawValue with the value corresponding to the key |
| read earlier |
| @param rawValue |
| @return the length of the value |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getKey" return="org.apache.hadoop.io.DataOutputBuffer" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the stored rawKey]]> |
| </doc> |
| </method> |
| <method name="cleanup" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The default cleanup. Subclasses can override this with a custom |
| cleanup]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class defines a merge segment. This class can be subclassed to |
| provide a customized cleanup method implementation. In this |
| implementation, cleanup closes the file handle and deletes the file]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor --> |
| <!-- start interface org.apache.hadoop.io.SequenceFile.ValueBytes --> |
| <interface name="SequenceFile.ValueBytes" abstract="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="writeUncompressedBytes" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="outStream" type="java.io.DataOutputStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Writes the uncompressed bytes to the outStream. |
| @param outStream : Stream to write uncompressed bytes into. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="writeCompressedBytes" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="outStream" type="java.io.DataOutputStream"/> |
| <exception name="IllegalArgumentException" type="java.lang.IllegalArgumentException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write compressed bytes to outStream. |
| Note: that it will NOT compress the bytes if they are not compressed. |
| @param outStream : Stream to write compressed bytes into.]]> |
| </doc> |
| </method> |
| <method name="getSize" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Size of stored data.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[The interface to 'raw' values of SequenceFiles.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.io.SequenceFile.ValueBytes --> |
| <!-- start class org.apache.hadoop.io.SequenceFile.Writer --> |
| <class name="SequenceFile.Writer" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.io.Closeable"/> |
| <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create the named file.]]> |
| </doc> |
| </constructor> |
| <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create the named file with write-progress reporter.]]> |
| </doc> |
| </constructor> |
| <constructor name="SequenceFile.Writer" type="org.apache.hadoop.fs.FileSystem, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.Path, java.lang.Class, java.lang.Class, int, short, long, org.apache.hadoop.util.Progressable, org.apache.hadoop.io.SequenceFile.Metadata" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create the named file with write-progress reporter.]]> |
| </doc> |
| </constructor> |
| <method name="getKeyClass" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the class of keys in this file.]]> |
| </doc> |
| </method> |
| <method name="getValueClass" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the class of values in this file.]]> |
| </doc> |
| </method> |
| <method name="getCompressionCodec" return="org.apache.hadoop.io.compress.CompressionCodec" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the compression codec of data in this file.]]> |
| </doc> |
| </method> |
| <method name="sync" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[create a sync point]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Close the file.]]> |
| </doc> |
| </method> |
| <method name="append" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.Writable"/> |
| <param name="val" type="org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Append a key/value pair.]]> |
| </doc> |
| </method> |
| <method name="append" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="java.lang.Object"/> |
| <param name="val" type="java.lang.Object"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Append a key/value pair.]]> |
| </doc> |
| </method> |
| <method name="appendRaw" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="keyData" type="byte[]"/> |
| <param name="keyOffset" type="int"/> |
| <param name="keyLength" type="int"/> |
| <param name="val" type="org.apache.hadoop.io.SequenceFile.ValueBytes"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getLength" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the current length of the output file. |
| |
| <p>This always returns a synchronized position. In other words, |
| immediately after calling {@link SequenceFile.Reader#seek(long)} with a position |
| returned by this method, {@link SequenceFile.Reader#next(Writable)} may be called. However |
| the key may be earlier in the file than key last written when this |
| method was called (e.g., with block-compression, it may be the first key |
| in the block that was being written when this method was called).]]> |
| </doc> |
| </method> |
| <field name="keySerializer" type="org.apache.hadoop.io.serializer.Serializer" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="uncompressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="compressedValSerializer" type="org.apache.hadoop.io.serializer.Serializer" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Write key/value pairs to a sequence-format file.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.SequenceFile.Writer --> |
| <!-- start class org.apache.hadoop.io.SetFile --> |
| <class name="SetFile" extends="org.apache.hadoop.io.MapFile" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SetFile" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[A file-based set of keys.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.SetFile --> |
| <!-- start class org.apache.hadoop.io.SetFile.Reader --> |
| <class name="SetFile.Reader" extends="org.apache.hadoop.io.MapFile.Reader" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct a set reader for the named set.]]> |
| </doc> |
| </constructor> |
| <constructor name="SetFile.Reader" type="org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct a set reader for the named set using the named comparator.]]> |
| </doc> |
| </constructor> |
| <method name="seek" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read the next key in a set into <code>key</code>. Returns |
| true if such a key exists and false when at the end of the set.]]> |
| </doc> |
| </method> |
| <method name="get" return="org.apache.hadoop.io.WritableComparable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read the matching key from a set into <code>key</code>. |
| Returns <code>key</code>, or null if no match exists.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Provide access to an existing set file.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.SetFile.Reader --> |
| <!-- start class org.apache.hadoop.io.SetFile.Writer --> |
| <class name="SetFile.Writer" extends="org.apache.hadoop.io.MapFile.Writer" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SetFile.Writer" type="org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class" |
| static="false" final="false" visibility="public" |
| deprecated="pass a Configuration too"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create the named set for keys of the named class. |
| @deprecated pass a Configuration too]]> |
| </doc> |
| </constructor> |
| <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, java.lang.Class, org.apache.hadoop.io.SequenceFile.CompressionType" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a set naming the element class and compression type.]]> |
| </doc> |
| </constructor> |
| <constructor name="SetFile.Writer" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem, java.lang.String, org.apache.hadoop.io.WritableComparator, org.apache.hadoop.io.SequenceFile.CompressionType" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a set naming the element comparator and compression type.]]> |
| </doc> |
| </constructor> |
| <method name="append" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Append a key to a set. The key must be strictly greater than the |
| previous key added to the set.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Write a new set file.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.SetFile.Writer --> |
| <!-- start class org.apache.hadoop.io.SortedMapWritable --> |
| <class name="SortedMapWritable" extends="org.apache.hadoop.io.AbstractMapWritable" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.util.SortedMap<org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable>"/> |
| <constructor name="SortedMapWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[default constructor.]]> |
| </doc> |
| </constructor> |
| <constructor name="SortedMapWritable" type="org.apache.hadoop.io.SortedMapWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Copy constructor. |
| |
| @param other the map to copy from]]> |
| </doc> |
| </constructor> |
| <method name="comparator" return="java.util.Comparator<? super org.apache.hadoop.io.WritableComparable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="firstKey" return="org.apache.hadoop.io.WritableComparable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="headMap" return="java.util.SortedMap<org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="lastKey" return="org.apache.hadoop.io.WritableComparable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="subMap" return="java.util.SortedMap<org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/> |
| <param name="toKey" type="org.apache.hadoop.io.WritableComparable"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="tailMap" return="java.util.SortedMap<org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fromKey" type="org.apache.hadoop.io.WritableComparable"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="clear" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="containsKey" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="containsValue" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="entrySet" return="java.util.Set<java.util.Map.Entry<org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable>>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="get" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="isEmpty" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="keySet" return="java.util.Set<org.apache.hadoop.io.WritableComparable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="put" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.WritableComparable"/> |
| <param name="value" type="org.apache.hadoop.io.Writable"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="putAll" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="t" type="java.util.Map<? extends org.apache.hadoop.io.WritableComparable, ? extends org.apache.hadoop.io.Writable>"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="remove" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="size" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="values" return="java.util.Collection<org.apache.hadoop.io.Writable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A Writable SortedMap.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.SortedMapWritable --> |
| <!-- start interface org.apache.hadoop.io.Stringifier --> |
| <interface name="Stringifier" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.io.Closeable"/> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="obj" type="T"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Converts the object to a string representation |
| @param obj the object to convert |
| @return the string representation of the object |
| @throws IOException if the object cannot be converted]]> |
| </doc> |
| </method> |
| <method name="fromString" return="T" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="str" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Restores the object from its string representation. |
| @param str the string representation of the object |
| @return restored object |
| @throws IOException if the object cannot be restored]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Closes this object. |
| @throws IOException if an I/O error occurs]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Stringifier interface offers two methods to convert an object |
| to a string representation and restore the object given its |
| string representation. |
| @param <T> the class of the objects to stringify]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.io.Stringifier --> |
| <!-- start class org.apache.hadoop.io.Text --> |
| <class name="Text" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.WritableComparable"/> |
| <constructor name="Text" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="Text" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct from a string.]]> |
| </doc> |
| </constructor> |
| <constructor name="Text" type="org.apache.hadoop.io.Text" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct from another text.]]> |
| </doc> |
| </constructor> |
| <constructor name="Text" type="byte[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct from a byte array.]]> |
| </doc> |
| </constructor> |
| <method name="getBytes" return="byte[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Retuns the raw bytes.]]> |
| </doc> |
| </method> |
| <method name="getLength" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the number of bytes in the byte array]]> |
| </doc> |
| </method> |
| <method name="charAt" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="position" type="int"/> |
| <doc> |
| <![CDATA[Returns the Unicode Scalar Value (32-bit integer value) |
| for the character at <code>position</code>. Note that this |
| method avoids using the converter or doing String instatiation |
| @return the Unicode scalar value at position or -1 |
| if the position is invalid or points to a |
| trailing byte]]> |
| </doc> |
| </method> |
| <method name="find" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="what" type="java.lang.String"/> |
| </method> |
| <method name="find" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="what" type="java.lang.String"/> |
| <param name="start" type="int"/> |
| <doc> |
| <![CDATA[Finds any occurence of <code>what</code> in the backing |
| buffer, starting as position <code>start</code>. The starting |
| position is measured in bytes and the return value is in |
| terms of byte position in the buffer. The backing buffer is |
| not converted to a string for this operation. |
| @return byte position of the first occurence of the search |
| string in the UTF-8 buffer or -1 if not found]]> |
| </doc> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="string" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set to contain the contents of a string.]]> |
| </doc> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="utf8" type="byte[]"/> |
| <doc> |
| <![CDATA[Set to a utf8 byte array]]> |
| </doc> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="other" type="org.apache.hadoop.io.Text"/> |
| <doc> |
| <![CDATA[copy a text.]]> |
| </doc> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="utf8" type="byte[]"/> |
| <param name="start" type="int"/> |
| <param name="len" type="int"/> |
| <doc> |
| <![CDATA[Set the Text to range of bytes |
| @param utf8 the data to copy from |
| @param start the first position of the new string |
| @param len the number of bytes of the new string]]> |
| </doc> |
| </method> |
| <method name="append" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="utf8" type="byte[]"/> |
| <param name="start" type="int"/> |
| <param name="len" type="int"/> |
| <doc> |
| <![CDATA[Append a range of bytes to the end of the given text |
| @param utf8 the data to copy from |
| @param start the first position to append from utf8 |
| @param len the number of bytes to append]]> |
| </doc> |
| </method> |
| <method name="clear" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Clear the string to empty.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Convert text back to string |
| @see java.lang.Object#toString()]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[deserialize]]> |
| </doc> |
| </method> |
| <method name="skip" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Skips over one Text in the input.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[serialize |
| write this object to out |
| length uses zero-compressed encoding |
| @see Writable#write(DataOutput)]]> |
| </doc> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Compare two Texts bytewise using standard UTF8 ordering.]]> |
| </doc> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Returns true iff <code>o</code> is a Text with the same contents.]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[hash function]]> |
| </doc> |
| </method> |
| <method name="decode" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="utf8" type="byte[]"/> |
| <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/> |
| <doc> |
| <![CDATA[Converts the provided byte array to a String using the |
| UTF-8 encoding. If the input is malformed, |
| replace by a default value.]]> |
| </doc> |
| </method> |
| <method name="decode" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="utf8" type="byte[]"/> |
| <param name="start" type="int"/> |
| <param name="length" type="int"/> |
| <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/> |
| </method> |
| <method name="decode" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="utf8" type="byte[]"/> |
| <param name="start" type="int"/> |
| <param name="length" type="int"/> |
| <param name="replace" type="boolean"/> |
| <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/> |
| <doc> |
| <![CDATA[Converts the provided byte array to a String using the |
| UTF-8 encoding. If <code>replace</code> is true, then |
| malformed input is replaced with the |
| substitution character, which is U+FFFD. Otherwise the |
| method throws a MalformedInputException.]]> |
| </doc> |
| </method> |
| <method name="encode" return="java.nio.ByteBuffer" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="string" type="java.lang.String"/> |
| <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/> |
| <doc> |
| <![CDATA[Converts the provided String to bytes using the |
| UTF-8 encoding. If the input is malformed, |
| invalid chars are replaced by a default value. |
| @return ByteBuffer: bytes stores at ByteBuffer.array() |
| and length is ByteBuffer.limit()]]> |
| </doc> |
| </method> |
| <method name="encode" return="java.nio.ByteBuffer" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="string" type="java.lang.String"/> |
| <param name="replace" type="boolean"/> |
| <exception name="CharacterCodingException" type="java.nio.charset.CharacterCodingException"/> |
| <doc> |
| <![CDATA[Converts the provided String to bytes using the |
| UTF-8 encoding. If <code>replace</code> is true, then |
| malformed input is replaced with the |
| substitution character, which is U+FFFD. Otherwise the |
| method throws a MalformedInputException. |
| @return ByteBuffer: bytes stores at ByteBuffer.array() |
| and length is ByteBuffer.limit()]]> |
| </doc> |
| </method> |
| <method name="readString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read a UTF8 encoded string from in]]> |
| </doc> |
| </method> |
| <method name="writeString" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <param name="s" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write a UTF8 encoded string to out]]> |
| </doc> |
| </method> |
| <method name="validateUTF8" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="utf8" type="byte[]"/> |
| <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/> |
| <doc> |
| <![CDATA[Check if a byte array contains valid utf-8 |
| @param utf8 byte array |
| @throws MalformedInputException if the byte array contains invalid utf-8]]> |
| </doc> |
| </method> |
| <method name="validateUTF8" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="utf8" type="byte[]"/> |
| <param name="start" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="MalformedInputException" type="java.nio.charset.MalformedInputException"/> |
| <doc> |
| <![CDATA[Check to see if a byte array is valid utf-8 |
| @param utf8 the array of bytes |
| @param start the offset of the first byte in the array |
| @param len the length of the byte sequence |
| @throws MalformedInputException if the byte array contains invalid bytes]]> |
| </doc> |
| </method> |
| <method name="bytesToCodePoint" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="java.nio.ByteBuffer"/> |
| <doc> |
| <![CDATA[Returns the next code point at the current position in |
| the buffer. The buffer's position will be incremented. |
| Any mark set on this buffer will be changed by this method!]]> |
| </doc> |
| </method> |
| <method name="utf8Length" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="string" type="java.lang.String"/> |
| <doc> |
| <![CDATA[For the given string, returns the number of UTF-8 bytes |
| required to encode the string. |
| @param string text to encode |
| @return number of UTF-8 bytes required to encode]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class stores text using standard UTF8 encoding. It provides methods |
| to serialize, deserialize, and compare texts at byte level. The type of |
| length is integer and is serialized using zero-compressed format. <p>In |
| addition, it provides methods for string traversal without converting the |
| byte array to a string. <p>Also includes utilities for |
| serializing/deserialing a string, coding/decoding a string, checking if a |
| byte array contains valid UTF8 code, calculating the length of an encoded |
| string.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.Text --> |
| <!-- start class org.apache.hadoop.io.Text.Comparator --> |
| <class name="Text.Comparator" extends="org.apache.hadoop.io.WritableComparator" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Text.Comparator" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| </method> |
| <doc> |
| <![CDATA[A WritableComparator optimized for Text keys.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.Text.Comparator --> |
| <!-- start class org.apache.hadoop.io.TwoDArrayWritable --> |
| <class name="TwoDArrayWritable" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <constructor name="TwoDArrayWritable" type="java.lang.Class" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="TwoDArrayWritable" type="java.lang.Class, org.apache.hadoop.io.Writable[][]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="toArray" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="values" type="org.apache.hadoop.io.Writable[][]"/> |
| </method> |
| <method name="get" return="org.apache.hadoop.io.Writable[][]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A Writable for 2D arrays containing a matrix of instances of a class.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.TwoDArrayWritable --> |
| <!-- start class org.apache.hadoop.io.UTF8 --> |
| <class name="UTF8" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="replaced by Text"> |
| <implements name="org.apache.hadoop.io.WritableComparable"/> |
| <constructor name="UTF8" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="UTF8" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct from a given string.]]> |
| </doc> |
| </constructor> |
| <constructor name="UTF8" type="org.apache.hadoop.io.UTF8" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct from a given string.]]> |
| </doc> |
| </constructor> |
| <method name="getBytes" return="byte[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The raw bytes.]]> |
| </doc> |
| </method> |
| <method name="getLength" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The number of bytes in the encoded string.]]> |
| </doc> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="string" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set to contain the contents of a string.]]> |
| </doc> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="other" type="org.apache.hadoop.io.UTF8"/> |
| <doc> |
| <![CDATA[Set to contain the contents of a string.]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="skip" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Skips over one UTF8 in the input.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Compare two UTF8s.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Convert to a String.]]> |
| </doc> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Returns true iff <code>o</code> is a UTF8 with the same contents.]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getBytes" return="byte[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="string" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Convert a string to a UTF-8 encoded byte array. |
| @see String#getBytes(String)]]> |
| </doc> |
| </method> |
| <method name="readString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read a UTF-8 encoded string. |
| |
| @see DataInput#readUTF()]]> |
| </doc> |
| </method> |
| <method name="writeString" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <param name="s" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write a UTF-8 encoded string. |
| |
| @see DataOutput#writeUTF(String)]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A WritableComparable for strings that uses the UTF8 encoding. |
| |
| <p>Also includes utilities for efficiently reading and writing UTF-8. |
| |
| @deprecated replaced by Text]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.UTF8 --> |
| <!-- start class org.apache.hadoop.io.UTF8.Comparator --> |
| <class name="UTF8.Comparator" extends="org.apache.hadoop.io.WritableComparator" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="UTF8.Comparator" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| </method> |
| <doc> |
| <![CDATA[A WritableComparator optimized for UTF8 keys.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.UTF8.Comparator --> |
| <!-- start class org.apache.hadoop.io.VersionedWritable --> |
| <class name="VersionedWritable" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <constructor name="VersionedWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getVersion" return="byte" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the version number of the current implementation.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A base class for Writables that provides version checking. |
| |
| <p>This is useful when a class may evolve, so that instances written by the |
| old version of the class may still be processed by the new version. To |
| handle this situation, {@link #readFields(DataInput)} |
| implementations should catch {@link VersionMismatchException}.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.VersionedWritable --> |
| <!-- start class org.apache.hadoop.io.VersionMismatchException --> |
| <class name="VersionMismatchException" extends="java.io.IOException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="VersionMismatchException" type="byte, byte" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns a string representation of this object.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Thrown by {@link VersionedWritable#readFields(DataInput)} when the |
| version of an object being read does not match the current implementation |
| version as returned by {@link VersionedWritable#getVersion()}.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.VersionMismatchException --> |
| <!-- start class org.apache.hadoop.io.VIntWritable --> |
| <class name="VIntWritable" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.WritableComparable"/> |
| <constructor name="VIntWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="VIntWritable" type="int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="int"/> |
| <doc> |
| <![CDATA[Set the value of this VIntWritable.]]> |
| </doc> |
| </method> |
| <method name="get" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the value of this VIntWritable.]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Returns true iff <code>o</code> is a VIntWritable with the same value.]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Compares two VIntWritables.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[A WritableComparable for integer values stored in variable-length format. |
| Such values take between one and five bytes. Smaller values take fewer bytes. |
| |
| @see org.apache.hadoop.io.WritableUtils#readVInt(DataInput)]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.VIntWritable --> |
| <!-- start class org.apache.hadoop.io.VLongWritable --> |
| <class name="VLongWritable" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.WritableComparable"/> |
| <constructor name="VLongWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="VLongWritable" type="long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="long"/> |
| <doc> |
| <![CDATA[Set the value of this LongWritable.]]> |
| </doc> |
| </method> |
| <method name="get" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the value of this LongWritable.]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Returns true iff <code>o</code> is a VLongWritable with the same value.]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Compares two VLongWritables.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[A WritableComparable for longs in a variable-length format. Such values take |
| between one and five bytes. Smaller values take fewer bytes. |
| |
| @see org.apache.hadoop.io.WritableUtils#readVLong(DataInput)]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.VLongWritable --> |
| <!-- start interface org.apache.hadoop.io.Writable --> |
| <interface name="Writable" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Serialize the fields of this object to <code>out</code>. |
| |
| @param out <code>DataOuput</code> to serialize this object into. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Deserialize the fields of this object from <code>in</code>. |
| |
| <p>For efficiency, implementations should attempt to re-use storage in the |
| existing object where possible.</p> |
| |
| @param in <code>DataInput</code> to deseriablize this object from. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A serializable object which implements a simple, efficient, serialization |
| protocol, based on {@link DataInput} and {@link DataOutput}. |
| |
| <p>Any <code>key</code> or <code>value</code> type in the Hadoop Map-Reduce |
| framework implements this interface.</p> |
| |
| <p>Implementations typically implement a static <code>read(DataInput)</code> |
| method which constructs a new instance, calls {@link #readFields(DataInput)} |
| and returns the instance.</p> |
| |
| <p>Example:</p> |
| <p><blockquote><pre> |
| public class MyWritable implements Writable { |
| // Some data |
| private int counter; |
| private long timestamp; |
| |
| public void write(DataOutput out) throws IOException { |
| out.writeInt(counter); |
| out.writeLong(timestamp); |
| } |
| |
| public void readFields(DataInput in) throws IOException { |
| counter = in.readInt(); |
| timestamp = in.readLong(); |
| } |
| |
| public static MyWritable read(DataInput in) throws IOException { |
| MyWritable w = new MyWritable(); |
| w.readFields(in); |
| return w; |
| } |
| } |
| </pre></blockquote></p>]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.io.Writable --> |
| <!-- start interface org.apache.hadoop.io.WritableComparable --> |
| <interface name="WritableComparable" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <implements name="java.lang.Comparable"/> |
| <doc> |
| <![CDATA[A {@link Writable} which is also {@link Comparable}. |
| |
| <p><code>WritableComparable</code>s can be compared to each other, typically |
| via <code>Comparator</code>s. Any type which is to be used as a |
| <code>key</code> in the Hadoop Map-Reduce framework should implement this |
| interface.</p> |
| |
| <p>Example:</p> |
| <p><blockquote><pre> |
| public class MyWritableComparable implements WritableComparable { |
| // Some data |
| private int counter; |
| private long timestamp; |
| |
| public void write(DataOutput out) throws IOException { |
| out.writeInt(counter); |
| out.writeLong(timestamp); |
| } |
| |
| public void readFields(DataInput in) throws IOException { |
| counter = in.readInt(); |
| timestamp = in.readLong(); |
| } |
| |
| public int compareTo(MyWritableComparable w) { |
| int thisValue = this.value; |
| int thatValue = ((IntWritable)o).value; |
| return (thisValue < thatValue ? -1 : (thisValue==thatValue ? 0 : 1)); |
| } |
| } |
| </pre></blockquote></p>]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.io.WritableComparable --> |
| <!-- start class org.apache.hadoop.io.WritableComparator --> |
| <class name="WritableComparator" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.RawComparator"/> |
| <constructor name="WritableComparator" type="java.lang.Class" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct for a {@link WritableComparable} implementation.]]> |
| </doc> |
| </constructor> |
| <method name="get" return="org.apache.hadoop.io.WritableComparator" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class"/> |
| <doc> |
| <![CDATA[Get a comparator for a {@link WritableComparable} implementation.]]> |
| </doc> |
| </method> |
| <method name="define" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class"/> |
| <param name="comparator" type="org.apache.hadoop.io.WritableComparator"/> |
| <doc> |
| <![CDATA[Register an optimized comparator for a {@link WritableComparable} |
| implementation.]]> |
| </doc> |
| </method> |
| <method name="getKeyClass" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the WritableComparable implementation class.]]> |
| </doc> |
| </method> |
| <method name="newKey" return="org.apache.hadoop.io.WritableComparable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a new {@link WritableComparable} instance.]]> |
| </doc> |
| </method> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| <doc> |
| <![CDATA[Optimization hook. Override this to make SequenceFile.Sorter's scream. |
| |
| <p>The default implementation reads the data into two {@link |
| WritableComparable}s (using {@link |
| Writable#readFields(DataInput)}, then calls {@link |
| #compare(WritableComparable,WritableComparable)}.]]> |
| </doc> |
| </method> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="a" type="org.apache.hadoop.io.WritableComparable"/> |
| <param name="b" type="org.apache.hadoop.io.WritableComparable"/> |
| <doc> |
| <![CDATA[Compare two WritableComparables. |
| |
| <p> The default implementation uses the natural ordering, calling {@link |
| Comparable#compareTo(Object)}.]]> |
| </doc> |
| </method> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="a" type="java.lang.Object"/> |
| <param name="b" type="java.lang.Object"/> |
| </method> |
| <method name="compareBytes" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| <doc> |
| <![CDATA[Lexicographic order of binary data.]]> |
| </doc> |
| </method> |
| <method name="hashBytes" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <param name="length" type="int"/> |
| <doc> |
| <![CDATA[Compute hash for binary data.]]> |
| </doc> |
| </method> |
| <method name="readUnsignedShort" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <param name="start" type="int"/> |
| <doc> |
| <![CDATA[Parse an unsigned short from a byte array.]]> |
| </doc> |
| </method> |
| <method name="readInt" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <param name="start" type="int"/> |
| <doc> |
| <![CDATA[Parse an integer from a byte array.]]> |
| </doc> |
| </method> |
| <method name="readFloat" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <param name="start" type="int"/> |
| <doc> |
| <![CDATA[Parse a float from a byte array.]]> |
| </doc> |
| </method> |
| <method name="readLong" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <param name="start" type="int"/> |
| <doc> |
| <![CDATA[Parse a long from a byte array.]]> |
| </doc> |
| </method> |
| <method name="readDouble" return="double" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <param name="start" type="int"/> |
| <doc> |
| <![CDATA[Parse a double from a byte array.]]> |
| </doc> |
| </method> |
| <method name="readVLong" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <param name="start" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it. |
| @param bytes byte array with decode long |
| @param start starting index |
| @throws java.io.IOException |
| @return deserialized long]]> |
| </doc> |
| </method> |
| <method name="readVInt" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <param name="start" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it. |
| @param bytes byte array with the encoded integer |
| @param start start index |
| @throws java.io.IOException |
| @return deserialized integer]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A Comparator for {@link WritableComparable}s. |
| |
| <p>This base implemenation uses the natural ordering. To define alternate |
| orderings, override {@link #compare(WritableComparable,WritableComparable)}. |
| |
| <p>One may optimize compare-intensive operations by overriding |
| {@link #compare(byte[],int,int,byte[],int,int)}. Static utility methods are |
| provided to assist in optimized implementations of this method.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.WritableComparator --> |
| <!-- start class org.apache.hadoop.io.WritableFactories --> |
| <class name="WritableFactories" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="setFactory" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class"/> |
| <param name="factory" type="org.apache.hadoop.io.WritableFactory"/> |
| <doc> |
| <![CDATA[Define a factory for a class.]]> |
| </doc> |
| </method> |
| <method name="getFactory" return="org.apache.hadoop.io.WritableFactory" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class"/> |
| <doc> |
| <![CDATA[Define a factory for a class.]]> |
| </doc> |
| </method> |
| <method name="newInstance" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Create a new instance of a class with a defined factory.]]> |
| </doc> |
| </method> |
| <method name="newInstance" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class"/> |
| <doc> |
| <![CDATA[Create a new instance of a class with a defined factory.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Factories for non-public writables. Defining a factory permits {@link |
| ObjectWritable} to be able to construct instances of non-public classes.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.WritableFactories --> |
| <!-- start interface org.apache.hadoop.io.WritableFactory --> |
| <interface name="WritableFactory" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="newInstance" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return a new instance.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A factory for a class of Writable. |
| @see WritableFactories]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.io.WritableFactory --> |
| <!-- start class org.apache.hadoop.io.WritableName --> |
| <class name="WritableName" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="setName" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="writableClass" type="java.lang.Class"/> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the name that a class should be known as to something other than the |
| class name.]]> |
| </doc> |
| </method> |
| <method name="addName" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="writableClass" type="java.lang.Class"/> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Add an alternate name for a class.]]> |
| </doc> |
| </method> |
| <method name="getName" return="java.lang.String" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="writableClass" type="java.lang.Class"/> |
| <doc> |
| <![CDATA[Return the name for a class. Default is {@link Class#getName()}.]]> |
| </doc> |
| </method> |
| <method name="getClass" return="java.lang.Class" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the class for a name. Default is {@link Class#forName(String)}.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Utility to permit renaming of Writable implementation classes without |
| invalidiating files that contain their class name.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.WritableName --> |
| <!-- start class org.apache.hadoop.io.WritableUtils --> |
| <class name="WritableUtils" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="WritableUtils" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="readCompressedByteArray" return="byte[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="skipCompressedByteArray" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeCompressedByteArray" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <param name="bytes" type="byte[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readCompressedString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeCompressedString" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <param name="s" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeString" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <param name="s" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeStringArray" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <param name="s" type="java.lang.String[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeCompressedStringArray" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <param name="s" type="java.lang.String[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readStringArray" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readCompressedStringArray" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="displayByteArray" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="record" type="byte[]"/> |
| </method> |
| <method name="clone" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="orig" type="org.apache.hadoop.io.Writable"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Make a copy of a writable object using serialization to a buffer. |
| @param orig The object to copy |
| @return The copied object]]> |
| </doc> |
| </method> |
| <method name="cloneInto" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dst" type="org.apache.hadoop.io.Writable"/> |
| <param name="src" type="org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Make a copy of the writable object using serialiation to a buffer |
| @param dst the object to copy from |
| @param src the object to copy into, which is destroyed |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="writeVInt" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="stream" type="java.io.DataOutput"/> |
| <param name="i" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Serializes an integer to a binary stream with zero-compressed encoding. |
| For -120 <= i <= 127, only one byte is used with the actual value. |
| For other values of i, the first byte value indicates whether the |
| integer is positive or negative, and the number of bytes that follow. |
| If the first byte value v is between -121 and -124, the following integer |
| is positive, with number of bytes that follow are -(v+120). |
| If the first byte value v is between -125 and -128, the following integer |
| is negative, with number of bytes that follow are -(v+124). Bytes are |
| stored in the high-non-zero-byte-first order. |
| |
| @param stream Binary output stream |
| @param i Integer to be serialized |
| @throws java.io.IOException]]> |
| </doc> |
| </method> |
| <method name="writeVLong" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="stream" type="java.io.DataOutput"/> |
| <param name="i" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Serializes a long to a binary stream with zero-compressed encoding. |
| For -112 <= i <= 127, only one byte is used with the actual value. |
| For other values of i, the first byte value indicates whether the |
| long is positive or negative, and the number of bytes that follow. |
| If the first byte value v is between -113 and -120, the following long |
| is positive, with number of bytes that follow are -(v+112). |
| If the first byte value v is between -121 and -128, the following long |
| is negative, with number of bytes that follow are -(v+120). Bytes are |
| stored in the high-non-zero-byte-first order. |
| |
| @param stream Binary output stream |
| @param i Long to be serialized |
| @throws java.io.IOException]]> |
| </doc> |
| </method> |
| <method name="readVLong" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="stream" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Reads a zero-compressed encoded long from input stream and returns it. |
| @param stream Binary input stream |
| @throws java.io.IOException |
| @return deserialized long from stream.]]> |
| </doc> |
| </method> |
| <method name="readVInt" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="stream" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Reads a zero-compressed encoded integer from input stream and returns it. |
| @param stream Binary input stream |
| @throws java.io.IOException |
| @return deserialized integer from stream.]]> |
| </doc> |
| </method> |
| <method name="isNegativeVInt" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="byte"/> |
| <doc> |
| <![CDATA[Given the first byte of a vint/vlong, determine the sign |
| @param value the first byte |
| @return is the value negative]]> |
| </doc> |
| </method> |
| <method name="decodeVIntSize" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="byte"/> |
| <doc> |
| <![CDATA[Parse the first byte of a vint/vlong to determine the number of bytes |
| @param value the first byte of the vint/vlong |
| @return the total number of bytes (1 to 9)]]> |
| </doc> |
| </method> |
| <method name="getVIntSize" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="long"/> |
| <doc> |
| <![CDATA[Get the encoded length if an integer is stored in a variable-length format |
| @return the encoded length]]> |
| </doc> |
| </method> |
| <method name="readEnum" return="T extends java.lang.Enum<T>" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <param name="enumType" type="java.lang.Class<T>"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read an Enum value from DataInput, Enums are read and written |
| using String values. |
| @param <T> Enum type |
| @param in DataInput to read from |
| @param enumType Class type of Enum |
| @return Enum represented by String read from DataInput |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="writeEnum" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <param name="enumVal" type="java.lang.Enum"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[writes String value of enum to DataOutput. |
| @param out Dataoutput stream |
| @param enumVal enum value |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="skipFully" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Skip <i>len</i> number of bytes in input stream<i>in</i> |
| @param in input stream |
| @param len number of bytes to skip |
| @throws IOException when skipped less number of bytes]]> |
| </doc> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.io.WritableUtils --> |
| <doc> |
| <![CDATA[Generic i/o code for use when reading and writing data to the network, |
| to databases, and to files.]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.io.compress"> |
| <!-- start interface org.apache.hadoop.io.compress.CompressionCodec --> |
| <interface name="CompressionCodec" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.OutputStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a {@link CompressionOutputStream} that will write to the given |
| {@link OutputStream}. |
| |
| @param out the location for the final output stream |
| @return a stream the user can write uncompressed data to have it compressed |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.OutputStream"/> |
| <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a {@link CompressionOutputStream} that will write to the given |
| {@link OutputStream} with the given {@link Compressor}. |
| |
| @param out the location for the final output stream |
| @param compressor compressor to use |
| @return a stream the user can write uncompressed data to have it compressed |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getCompressorType" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the type of {@link Compressor} needed by this {@link CompressionCodec}. |
| |
| @return the type of compressor needed by this codec.]]> |
| </doc> |
| </method> |
| <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a new {@link Compressor} for use by this {@link CompressionCodec}. |
| |
| @return a new compressor for use by this codec]]> |
| </doc> |
| </method> |
| <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a stream decompressor that will read from the given input stream. |
| |
| @param in the stream to read compressed bytes from |
| @return a stream to read uncompressed bytes from |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a {@link CompressionInputStream} that will read from the given |
| {@link InputStream} with the given {@link Decompressor}. |
| |
| @param in the stream to read compressed bytes from |
| @param decompressor decompressor to use |
| @return a stream to read uncompressed bytes from |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getDecompressorType" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the type of {@link Decompressor} needed by this {@link CompressionCodec}. |
| |
| @return the type of decompressor needed by this codec.]]> |
| </doc> |
| </method> |
| <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a new {@link Decompressor} for use by this {@link CompressionCodec}. |
| |
| @return a new decompressor for use by this codec]]> |
| </doc> |
| </method> |
| <method name="getDefaultExtension" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the default filename extension for this kind of compression. |
| @return the extension including the '.']]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class encapsulates a streaming compression/decompression pair.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.io.compress.CompressionCodec --> |
| <!-- start class org.apache.hadoop.io.compress.CompressionCodecFactory --> |
| <class name="CompressionCodecFactory" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="CompressionCodecFactory" type="org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Find the codecs specified in the config value io.compression.codecs |
| and register them. Defaults to gzip and zip.]]> |
| </doc> |
| </constructor> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Print the extension map out as a string.]]> |
| </doc> |
| </method> |
| <method name="getCodecClasses" return="java.util.List<java.lang.Class>" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Get the list of codecs listed in the configuration |
| @param conf the configuration to look in |
| @return a list of the Configuration classes or null if the attribute |
| was not set]]> |
| </doc> |
| </method> |
| <method name="setCodecClasses" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="classes" type="java.util.List<java.lang.Class>"/> |
| <doc> |
| <![CDATA[Sets a list of codec classes in the configuration. |
| @param conf the configuration to modify |
| @param classes the list of classes to set]]> |
| </doc> |
| </method> |
| <method name="getCodec" return="org.apache.hadoop.io.compress.CompressionCodec" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="file" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Find the relevant compression codec for the given file based on its |
| filename suffix. |
| @param file the filename to check |
| @return the codec object]]> |
| </doc> |
| </method> |
| <method name="removeSuffix" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="filename" type="java.lang.String"/> |
| <param name="suffix" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Removes a suffix from a filename, if it has it. |
| @param filename the filename to strip |
| @param suffix the suffix to remove |
| @return the shortened filename]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[A little test program. |
| @param args]]> |
| </doc> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[A factory that will find the correct codec for a given filename.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.CompressionCodecFactory --> |
| <!-- start class org.apache.hadoop.io.compress.CompressionInputStream --> |
| <class name="CompressionInputStream" extends="java.io.InputStream" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="CompressionInputStream" type="java.io.InputStream" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a compression input stream that reads |
| the decompressed bytes from the given stream. |
| |
| @param in The input stream to be compressed.]]> |
| </doc> |
| </constructor> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="read" return="int" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read bytes from the stream. |
| Made abstract to prevent leakage to underlying stream.]]> |
| </doc> |
| </method> |
| <method name="resetState" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Reset the decompressor to its initial state and discard any buffered data, |
| as the underlying stream may have been repositioned.]]> |
| </doc> |
| </method> |
| <field name="in" type="java.io.InputStream" |
| transient="false" volatile="false" |
| static="false" final="true" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The input stream to be compressed.]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[A compression input stream. |
| |
| <p>Implementations are assumed to be buffered. This permits clients to |
| reposition the underlying input stream then call {@link #resetState()}, |
| without having to also synchronize client buffers.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.CompressionInputStream --> |
| <!-- start class org.apache.hadoop.io.compress.CompressionOutputStream --> |
| <class name="CompressionOutputStream" extends="java.io.OutputStream" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="CompressionOutputStream" type="java.io.OutputStream" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a compression output stream that writes |
| the compressed bytes to the given stream. |
| @param out]]> |
| </doc> |
| </constructor> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="flush" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write compressed bytes to the stream. |
| Made abstract to prevent leakage to underlying stream.]]> |
| </doc> |
| </method> |
| <method name="finish" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Finishes writing compressed data to the output stream |
| without closing the underlying stream.]]> |
| </doc> |
| </method> |
| <method name="resetState" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Reset the compression to the initial state. |
| Does not reset the underlying stream.]]> |
| </doc> |
| </method> |
| <field name="out" type="java.io.OutputStream" |
| transient="false" volatile="false" |
| static="false" final="true" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The output stream to be compressed.]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[A compression output stream.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.CompressionOutputStream --> |
| <!-- start interface org.apache.hadoop.io.compress.Compressor --> |
| <interface name="Compressor" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="setInput" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <doc> |
| <![CDATA[Sets input data for compression. |
| This should be called whenever #needsInput() returns |
| <code>true</code> indicating that more input data is required. |
| |
| @param b Input data |
| @param off Start offset |
| @param len Length]]> |
| </doc> |
| </method> |
| <method name="needsInput" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns true if the input data buffer is empty and |
| #setInput() should be called to provide more input. |
| |
| @return <code>true</code> if the input data buffer is empty and |
| #setInput() should be called in order to provide more input.]]> |
| </doc> |
| </method> |
| <method name="setDictionary" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <doc> |
| <![CDATA[Sets preset dictionary for compression. A preset dictionary |
| is used when the history buffer can be predetermined. |
| |
| @param b Dictionary data bytes |
| @param off Start offset |
| @param len Length]]> |
| </doc> |
| </method> |
| <method name="getBytesRead" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return number of uncompressed bytes input so far.]]> |
| </doc> |
| </method> |
| <method name="getBytesWritten" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return number of compressed bytes output so far.]]> |
| </doc> |
| </method> |
| <method name="finish" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[When called, indicates that compression should end |
| with the current contents of the input buffer.]]> |
| </doc> |
| </method> |
| <method name="finished" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns true if the end of the compressed |
| data output stream has been reached. |
| @return <code>true</code> if the end of the compressed |
| data output stream has been reached.]]> |
| </doc> |
| </method> |
| <method name="compress" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Fills specified buffer with compressed data. Returns actual number |
| of bytes of compressed data. A return value of 0 indicates that |
| needsInput() should be called in order to determine if more input |
| data is required. |
| |
| @param b Buffer for the compressed data |
| @param off Start offset of the data |
| @param len Size of the buffer |
| @return The actual number of bytes of compressed data.]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Resets compressor so that a new set of input data can be processed.]]> |
| </doc> |
| </method> |
| <method name="end" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Closes the compressor and discards any unprocessed input.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Specification of a stream-based 'compressor' which can be |
| plugged into a {@link CompressionOutputStream} to compress data. |
| This is modelled after {@link java.util.zip.Deflater}]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.io.compress.Compressor --> |
| <!-- start interface org.apache.hadoop.io.compress.Decompressor --> |
| <interface name="Decompressor" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="setInput" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <doc> |
| <![CDATA[Sets input data for decompression. |
| This should be called whenever #needsInput() returns |
| <code>true</code> indicating that more input data is required. |
| |
| @param b Input data |
| @param off Start offset |
| @param len Length]]> |
| </doc> |
| </method> |
| <method name="needsInput" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns true if the input data buffer is empty and |
| #setInput() should be called to provide more input. |
| |
| @return <code>true</code> if the input data buffer is empty and |
| #setInput() should be called in order to provide more input.]]> |
| </doc> |
| </method> |
| <method name="setDictionary" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <doc> |
| <![CDATA[Sets preset dictionary for compression. A preset dictionary |
| is used when the history buffer can be predetermined. |
| |
| @param b Dictionary data bytes |
| @param off Start offset |
| @param len Length]]> |
| </doc> |
| </method> |
| <method name="needsDictionary" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns <code>true</code> if a preset dictionary is needed for decompression. |
| @return <code>true</code> if a preset dictionary is needed for decompression]]> |
| </doc> |
| </method> |
| <method name="finished" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns true if the end of the compressed |
| data output stream has been reached. |
| @return <code>true</code> if the end of the compressed |
| data output stream has been reached.]]> |
| </doc> |
| </method> |
| <method name="decompress" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Fills specified buffer with uncompressed data. Returns actual number |
| of bytes of uncompressed data. A return value of 0 indicates that |
| #needsInput() should be called in order to determine if more input |
| data is required. |
| |
| @param b Buffer for the compressed data |
| @param off Start offset of the data |
| @param len Size of the buffer |
| @return The actual number of bytes of compressed data. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Resets decompressor so that a new set of input data can be processed.]]> |
| </doc> |
| </method> |
| <method name="end" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Closes the decompressor and discards any unprocessed input.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Specification of a stream-based 'de-compressor' which can be |
| plugged into a {@link CompressionInputStream} to compress data. |
| This is modelled after {@link java.util.zip.Inflater}]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.io.compress.Decompressor --> |
| <!-- start class org.apache.hadoop.io.compress.DefaultCodec --> |
| <class name="DefaultCodec" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.conf.Configurable"/> |
| <implements name="org.apache.hadoop.io.compress.CompressionCodec"/> |
| <constructor name="DefaultCodec" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| </method> |
| <method name="getConf" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.OutputStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.OutputStream"/> |
| <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getCompressorType" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getDecompressorType" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getDefaultExtension" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.DefaultCodec --> |
| <!-- start class org.apache.hadoop.io.compress.GzipCodec --> |
| <class name="GzipCodec" extends="org.apache.hadoop.io.compress.DefaultCodec" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="GzipCodec" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.OutputStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.OutputStream"/> |
| <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getCompressorType" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getDecompressorType" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getDefaultExtension" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[This class creates gzip compressors/decompressors.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.GzipCodec --> |
| <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream --> |
| <class name="GzipCodec.GzipInputStream" extends="org.apache.hadoop.io.compress.DecompressorStream" |
| abstract="false" |
| static="true" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <constructor name="GzipCodec.GzipInputStream" type="java.io.InputStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <constructor name="GzipCodec.GzipInputStream" type="org.apache.hadoop.io.compress.DecompressorStream" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Allow subclasses to directly set the inflater stream.]]> |
| </doc> |
| </constructor> |
| <method name="available" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="read" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="read" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="data" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="skip" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="offset" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="resetState" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipInputStream --> |
| <!-- start class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream --> |
| <class name="GzipCodec.GzipOutputStream" extends="org.apache.hadoop.io.compress.CompressorStream" |
| abstract="false" |
| static="true" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <constructor name="GzipCodec.GzipOutputStream" type="java.io.OutputStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <constructor name="GzipCodec.GzipOutputStream" type="org.apache.hadoop.io.compress.CompressorStream" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Allow children types to put a different type in here. |
| @param out the Deflater stream to use]]> |
| </doc> |
| </constructor> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="flush" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="data" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="length" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="finish" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="resetState" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A bridge that wraps around a DeflaterOutputStream to make it |
| a CompressionOutputStream.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.GzipCodec.GzipOutputStream --> |
| <!-- start class org.apache.hadoop.io.compress.LzoCodec --> |
| <class name="LzoCodec" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.conf.Configurable"/> |
| <implements name="org.apache.hadoop.io.compress.CompressionCodec"/> |
| <constructor name="LzoCodec" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| </method> |
| <method name="getConf" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="isNativeLzoLoaded" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Check if native-lzo library is loaded & initialized. |
| |
| @param conf configuration |
| @return <code>true</code> if native-lzo library is loaded & initialized; |
| else <code>false</code>]]> |
| </doc> |
| </method> |
| <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.OutputStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createOutputStream" return="org.apache.hadoop.io.compress.CompressionOutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.OutputStream"/> |
| <param name="compressor" type="org.apache.hadoop.io.compress.Compressor"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getCompressorType" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="createCompressor" return="org.apache.hadoop.io.compress.Compressor" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createInputStream" return="org.apache.hadoop.io.compress.CompressionInputStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <param name="decompressor" type="org.apache.hadoop.io.compress.Decompressor"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getDecompressorType" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="createDecompressor" return="org.apache.hadoop.io.compress.Decompressor" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getDefaultExtension" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the default filename extension for this kind of compression. |
| @return the extension including the '.']]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A {@link org.apache.hadoop.io.compress.CompressionCodec} for a streaming |
| <b>lzo</b> compression/decompression pair. |
| http://www.oberhumer.com/opensource/lzo/]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.LzoCodec --> |
| </package> |
| <package name="org.apache.hadoop.io.compress.lzo"> |
| <!-- start class org.apache.hadoop.io.compress.lzo.LzoCompressor --> |
| <class name="LzoCompressor" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.compress.Compressor"/> |
| <constructor name="LzoCompressor" type="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new compressor using the specified {@link CompressionStrategy}. |
| |
| @param strategy lzo compression algorithm to use |
| @param directBufferSize size of the direct buffer to be used.]]> |
| </doc> |
| </constructor> |
| <constructor name="LzoCompressor" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new compressor with the default lzo1x_1 compression.]]> |
| </doc> |
| </constructor> |
| <method name="isNativeLzoLoaded" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Check if lzo compressors are loaded and initialized. |
| |
| @return <code>true</code> if lzo compressors are loaded & initialized, |
| else <code>false</code>]]> |
| </doc> |
| </method> |
| <method name="setInput" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| </method> |
| <method name="setDictionary" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| </method> |
| <method name="needsInput" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="finish" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="finished" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="compress" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getBytesRead" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return number of bytes given to this compressor since last reset.]]> |
| </doc> |
| </method> |
| <method name="getBytesWritten" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return number of bytes consumed by callers of compress since last reset.]]> |
| </doc> |
| </method> |
| <method name="end" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Noop.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A {@link Compressor} based on the lzo algorithm. |
| http://www.oberhumer.com/opensource/lzo/]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.lzo.LzoCompressor --> |
| <!-- start class org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy --> |
| <class name="LzoCompressor.CompressionStrategy" extends="java.lang.Enum<org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| <doc> |
| <![CDATA[The compression algorithm for lzo library.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.lzo.LzoCompressor.CompressionStrategy --> |
| <!-- start class org.apache.hadoop.io.compress.lzo.LzoDecompressor --> |
| <class name="LzoDecompressor" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.compress.Decompressor"/> |
| <constructor name="LzoDecompressor" type="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new lzo decompressor. |
| |
| @param strategy lzo decompression algorithm |
| @param directBufferSize size of the direct-buffer]]> |
| </doc> |
| </constructor> |
| <constructor name="LzoDecompressor" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new lzo decompressor.]]> |
| </doc> |
| </constructor> |
| <method name="isNativeLzoLoaded" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Check if lzo decompressors are loaded and initialized. |
| |
| @return <code>true</code> if lzo decompressors are loaded & initialized, |
| else <code>false</code>]]> |
| </doc> |
| </method> |
| <method name="setInput" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| </method> |
| <method name="setDictionary" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| </method> |
| <method name="needsInput" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="needsDictionary" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="finished" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="decompress" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="end" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="finalize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[A {@link Decompressor} based on the lzo algorithm. |
| http://www.oberhumer.com/opensource/lzo/]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.lzo.LzoDecompressor --> |
| <!-- start class org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy --> |
| <class name="LzoDecompressor.CompressionStrategy" extends="java.lang.Enum<org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.lzo.LzoDecompressor.CompressionStrategy --> |
| </package> |
| <package name="org.apache.hadoop.io.compress.zlib"> |
| <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater --> |
| <class name="BuiltInZlibDeflater" extends="java.util.zip.Deflater" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.compress.Compressor"/> |
| <constructor name="BuiltInZlibDeflater" type="int, boolean" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="BuiltInZlibDeflater" type="int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="BuiltInZlibDeflater" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="compress" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A wrapper around java.util.zip.Deflater to make it conform |
| to org.apache.hadoop.io.compress.Compressor interface.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater --> |
| <!-- start class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater --> |
| <class name="BuiltInZlibInflater" extends="java.util.zip.Inflater" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.compress.Decompressor"/> |
| <constructor name="BuiltInZlibInflater" type="boolean" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="BuiltInZlibInflater" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="decompress" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A wrapper around java.util.zip.Inflater to make it conform |
| to org.apache.hadoop.io.compress.Decompressor interface.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater --> |
| <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor --> |
| <class name="ZlibCompressor" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.compress.Compressor"/> |
| <constructor name="ZlibCompressor" type="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy, org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new compressor using the specified compression level. |
| Compressed data will be generated in ZLIB format. |
| |
| @param level Compression level #CompressionLevel |
| @param strategy Compression strategy #CompressionStrategy |
| @param header Compression header #CompressionHeader |
| @param directBufferSize Size of the direct buffer to be used.]]> |
| </doc> |
| </constructor> |
| <constructor name="ZlibCompressor" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new compressor with the default compression level. |
| Compressed data will be generated in ZLIB format.]]> |
| </doc> |
| </constructor> |
| <method name="setInput" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| </method> |
| <method name="setDictionary" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| </method> |
| <method name="needsInput" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="finish" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="finished" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="compress" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getBytesWritten" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the total number of compressed bytes output so far. |
| |
| @return the total (non-negative) number of compressed bytes output so far]]> |
| </doc> |
| </method> |
| <method name="getBytesRead" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the total number of uncompressed bytes input so far.</p> |
| |
| @return the total (non-negative) number of uncompressed bytes input so far]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="end" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[A {@link Compressor} based on the popular |
| zlib compression algorithm. |
| http://www.zlib.net/]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor --> |
| <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader --> |
| <class name="ZlibCompressor.CompressionHeader" extends="java.lang.Enum<org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| <method name="windowBits" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[The type of header for compressed data.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionHeader --> |
| <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel --> |
| <class name="ZlibCompressor.CompressionLevel" extends="java.lang.Enum<org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| <doc> |
| <![CDATA[The compression level for zlib library.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionLevel --> |
| <!-- start class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy --> |
| <class name="ZlibCompressor.CompressionStrategy" extends="java.lang.Enum<org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| <doc> |
| <![CDATA[The compression level for zlib library.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.zlib.ZlibCompressor.CompressionStrategy --> |
| <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor --> |
| <class name="ZlibDecompressor" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.compress.Decompressor"/> |
| <constructor name="ZlibDecompressor" type="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new decompressor.]]> |
| </doc> |
| </constructor> |
| <constructor name="ZlibDecompressor" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setInput" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| </method> |
| <method name="setDictionary" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| </method> |
| <method name="needsInput" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="needsDictionary" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="finished" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="decompress" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getBytesWritten" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the total number of compressed bytes output so far. |
| |
| @return the total (non-negative) number of compressed bytes output so far]]> |
| </doc> |
| </method> |
| <method name="getBytesRead" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the total number of uncompressed bytes input so far.</p> |
| |
| @return the total (non-negative) number of uncompressed bytes input so far]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="end" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="finalize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[A {@link Decompressor} based on the popular |
| zlib compression algorithm. |
| http://www.zlib.net/]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor --> |
| <!-- start class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader --> |
| <class name="ZlibDecompressor.CompressionHeader" extends="java.lang.Enum<org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| <method name="windowBits" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[The headers to detect from compressed data.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.zlib.ZlibDecompressor.CompressionHeader --> |
| <!-- start class org.apache.hadoop.io.compress.zlib.ZlibFactory --> |
| <class name="ZlibFactory" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ZlibFactory" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="isNativeZlibLoaded" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Check if native-zlib code is loaded & initialized correctly and |
| can be loaded for this job. |
| |
| @param conf configuration |
| @return <code>true</code> if native-zlib is loaded & initialized |
| and can be loaded for this job, else <code>false</code>]]> |
| </doc> |
| </method> |
| <method name="getZlibCompressorType" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Return the appropriate type of the zlib compressor. |
| |
| @param conf configuration |
| @return the appropriate type of the zlib compressor.]]> |
| </doc> |
| </method> |
| <method name="getZlibCompressor" return="org.apache.hadoop.io.compress.Compressor" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Return the appropriate implementation of the zlib compressor. |
| |
| @param conf configuration |
| @return the appropriate implementation of the zlib compressor.]]> |
| </doc> |
| </method> |
| <method name="getZlibDecompressorType" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Return the appropriate type of the zlib decompressor. |
| |
| @param conf configuration |
| @return the appropriate type of the zlib decompressor.]]> |
| </doc> |
| </method> |
| <method name="getZlibDecompressor" return="org.apache.hadoop.io.compress.Decompressor" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Return the appropriate implementation of the zlib decompressor. |
| |
| @param conf configuration |
| @return the appropriate implementation of the zlib decompressor.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A collection of factories to create the right |
| zlib/gzip compressor/decompressor instances.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.compress.zlib.ZlibFactory --> |
| </package> |
| <package name="org.apache.hadoop.io.retry"> |
| <!-- start class org.apache.hadoop.io.retry.RetryPolicies --> |
| <class name="RetryPolicies" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="RetryPolicies" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="retryUpToMaximumCountWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <param name="maxRetries" type="int"/> |
| <param name="sleepTime" type="long"/> |
| <param name="timeUnit" type="java.util.concurrent.TimeUnit"/> |
| <doc> |
| <![CDATA[<p> |
| Keep trying a limited number of times, waiting a fixed time between attempts, |
| and then fail by re-throwing the exception. |
| </p>]]> |
| </doc> |
| </method> |
| <method name="retryUpToMaximumTimeWithFixedSleep" return="org.apache.hadoop.io.retry.RetryPolicy" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <param name="maxTime" type="long"/> |
| <param name="sleepTime" type="long"/> |
| <param name="timeUnit" type="java.util.concurrent.TimeUnit"/> |
| <doc> |
| <![CDATA[<p> |
| Keep trying for a maximum time, waiting a fixed time between attempts, |
| and then fail by re-throwing the exception. |
| </p>]]> |
| </doc> |
| </method> |
| <method name="retryUpToMaximumCountWithProportionalSleep" return="org.apache.hadoop.io.retry.RetryPolicy" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <param name="maxRetries" type="int"/> |
| <param name="sleepTime" type="long"/> |
| <param name="timeUnit" type="java.util.concurrent.TimeUnit"/> |
| <doc> |
| <![CDATA[<p> |
| Keep trying a limited number of times, waiting a growing amount of time between attempts, |
| and then fail by re-throwing the exception. |
| The time between attempts is <code>sleepTime</code> mutliplied by the number of tries so far. |
| </p>]]> |
| </doc> |
| </method> |
| <method name="exponentialBackoffRetry" return="org.apache.hadoop.io.retry.RetryPolicy" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <param name="maxRetries" type="int"/> |
| <param name="sleepTime" type="long"/> |
| <param name="timeUnit" type="java.util.concurrent.TimeUnit"/> |
| <doc> |
| <![CDATA[<p> |
| Keep trying a limited number of times, waiting a growing amount of time between attempts, |
| and then fail by re-throwing the exception. |
| The time between attempts is <code>sleepTime</code> mutliplied by a random |
| number in the range of [0, 2 to the number of retries) |
| </p>]]> |
| </doc> |
| </method> |
| <method name="retryByException" return="org.apache.hadoop.io.retry.RetryPolicy" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/> |
| <param name="exceptionToPolicyMap" type="java.util.Map<java.lang.Class<? extends java.lang.Exception>, org.apache.hadoop.io.retry.RetryPolicy>"/> |
| <doc> |
| <![CDATA[<p> |
| Set a default policy with some explicit handlers for specific exceptions. |
| </p>]]> |
| </doc> |
| </method> |
| <method name="retryByRemoteException" return="org.apache.hadoop.io.retry.RetryPolicy" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <param name="defaultPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/> |
| <param name="exceptionToPolicyMap" type="java.util.Map<java.lang.Class<? extends java.lang.Exception>, org.apache.hadoop.io.retry.RetryPolicy>"/> |
| <doc> |
| <![CDATA[<p> |
| A retry policy for RemoteException |
| Set a default policy with some explicit handlers for specific exceptions. |
| </p>]]> |
| </doc> |
| </method> |
| <field name="TRY_ONCE_THEN_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[<p> |
| Try once, and fail by re-throwing the exception. |
| This corresponds to having no retry mechanism in place. |
| </p>]]> |
| </doc> |
| </field> |
| <field name="TRY_ONCE_DONT_FAIL" type="org.apache.hadoop.io.retry.RetryPolicy" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[<p> |
| Try once, and fail silently for <code>void</code> methods, or by |
| re-throwing the exception for non-<code>void</code> methods. |
| </p>]]> |
| </doc> |
| </field> |
| <field name="RETRY_FOREVER" type="org.apache.hadoop.io.retry.RetryPolicy" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[<p> |
| Keep trying forever. |
| </p>]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[<p> |
| A collection of useful implementations of {@link RetryPolicy}. |
| </p>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.retry.RetryPolicies --> |
| <!-- start interface org.apache.hadoop.io.retry.RetryPolicy --> |
| <interface name="RetryPolicy" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="shouldRetry" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="e" type="java.lang.Exception"/> |
| <param name="retries" type="int"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[<p> |
| Determines whether the framework should retry a |
| method for the given exception, and the number |
| of retries that have been made for that operation |
| so far. |
| </p> |
| @param e The exception that caused the method to fail. |
| @param retries The number of times the method has been retried. |
| @return <code>true</code> if the method should be retried, |
| <code>false</code> if the method should not be retried |
| but shouldn't fail with an exception (only for void methods). |
| @throws Exception The re-thrown exception <code>e</code> indicating |
| that the method failed and should not be retried further.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[<p> |
| Specifies a policy for retrying method failures. |
| Implementations of this interface should be immutable. |
| </p>]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.io.retry.RetryPolicy --> |
| <!-- start class org.apache.hadoop.io.retry.RetryProxy --> |
| <class name="RetryProxy" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="RetryProxy" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="create" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="iface" type="java.lang.Class<?>"/> |
| <param name="implementation" type="java.lang.Object"/> |
| <param name="retryPolicy" type="org.apache.hadoop.io.retry.RetryPolicy"/> |
| <doc> |
| <![CDATA[<p> |
| Create a proxy for an interface of an implementation class |
| using the same retry policy for each method in the interface. |
| </p> |
| @param iface the interface that the retry will implement |
| @param implementation the instance whose methods should be retried |
| @param retryPolicy the policy for retirying method call failures |
| @return the retry proxy]]> |
| </doc> |
| </method> |
| <method name="create" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="iface" type="java.lang.Class<?>"/> |
| <param name="implementation" type="java.lang.Object"/> |
| <param name="methodNameToPolicyMap" type="java.util.Map<java.lang.String, org.apache.hadoop.io.retry.RetryPolicy>"/> |
| <doc> |
| <![CDATA[<p> |
| Create a proxy for an interface of an implementation class |
| using the a set of retry policies specified by method name. |
| If no retry policy is defined for a method then a default of |
| {@link RetryPolicies#TRY_ONCE_THEN_FAIL} is used. |
| </p> |
| @param iface the interface that the retry will implement |
| @param implementation the instance whose methods should be retried |
| @param methodNameToPolicyMap a map of method names to retry policies |
| @return the retry proxy]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[<p> |
| A factory for creating retry proxies. |
| </p>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.retry.RetryProxy --> |
| <doc> |
| <![CDATA[<p> |
| A mechanism for selectively retrying methods that throw exceptions under certain circumstances. |
| </p> |
| |
| <p> |
| Typical usage is |
| </p> |
| |
| <pre> |
| UnreliableImplementation unreliableImpl = new UnreliableImplementation(); |
| UnreliableInterface unreliable = (UnreliableInterface) |
| RetryProxy.create(UnreliableInterface.class, unreliableImpl, |
| RetryPolicies.retryUpToMaximumCountWithFixedSleep(4, 10, TimeUnit.SECONDS)); |
| unreliable.call(); |
| </pre> |
| |
| <p> |
| This will retry any method called on <code>unreliable</code> four times - in this case the <code>call()</code> |
| method - sleeping 10 seconds between |
| each retry. There are a number of {@link org.apache.hadoop.io.retry.RetryPolicies retry policies} |
| available, or you can implement a custom one by implementing {@link org.apache.hadoop.io.retry.RetryPolicy}. |
| It is also possible to specify retry policies on a |
| {@link org.apache.hadoop.io.retry.RetryProxy#create(Class, Object, Map) per-method basis}. |
| </p>]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.io.serializer"> |
| <!-- start interface org.apache.hadoop.io.serializer.Deserializer --> |
| <interface name="Deserializer" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="open" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.InputStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[<p>Prepare the deserializer for reading.</p>]]> |
| </doc> |
| </method> |
| <method name="deserialize" return="T" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="t" type="T"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[<p> |
| Deserialize the next object from the underlying input stream. |
| If the object <code>t</code> is non-null then this deserializer |
| <i>may</i> set its internal state to the next object read from the input |
| stream. Otherwise, if the object <code>t</code> is null a new |
| deserialized object will be created. |
| </p> |
| @return the deserialized object]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[<p>Close the underlying input stream and clear up any resources.</p>]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[<p> |
| Provides a facility for deserializing objects of type <T> from an |
| {@link InputStream}. |
| </p> |
| |
| <p> |
| Deserializers are stateful, but must not buffer the input since |
| other producers may read from the input between calls to |
| {@link #deserialize(Object)}. |
| </p> |
| @param <T>]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.io.serializer.Deserializer --> |
| <!-- start class org.apache.hadoop.io.serializer.DeserializerComparator --> |
| <class name="DeserializerComparator" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.RawComparator<T>"/> |
| <constructor name="DeserializerComparator" type="org.apache.hadoop.io.serializer.Deserializer<T>" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| </method> |
| <doc> |
| <![CDATA[<p> |
| A {@link RawComparator} that uses a {@link Deserializer} to deserialize |
| the objects to be compared so that the standard {@link Comparator} can |
| be used to compare them. |
| </p> |
| <p> |
| One may optimize compare-intensive operations by using a custom |
| implementation of {@link RawComparator} that operates directly |
| on byte representations. |
| </p> |
| @param <T>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.serializer.DeserializerComparator --> |
| <!-- start class org.apache.hadoop.io.serializer.JavaSerialization --> |
| <class name="JavaSerialization" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.serializer.Serialization<java.io.Serializable>"/> |
| <constructor name="JavaSerialization" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="accept" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class<?>"/> |
| </method> |
| <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer<java.io.Serializable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class<java.io.Serializable>"/> |
| </method> |
| <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer<java.io.Serializable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class<java.io.Serializable>"/> |
| </method> |
| <doc> |
| <![CDATA[<p> |
| An experimental {@link Serialization} for Java {@link Serializable} classes. |
| </p> |
| @see JavaSerializationComparator]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.serializer.JavaSerialization --> |
| <!-- start class org.apache.hadoop.io.serializer.JavaSerializationComparator --> |
| <class name="JavaSerializationComparator" extends="org.apache.hadoop.io.serializer.DeserializerComparator<T>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JavaSerializationComparator" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o1" type="T extends java.io.Serializable & java.lang.Comparable<T>"/> |
| <param name="o2" type="T extends java.io.Serializable & java.lang.Comparable<T>"/> |
| </method> |
| <doc> |
| <![CDATA[<p> |
| A {@link RawComparator} that uses a {@link JavaSerialization} |
| {@link Deserializer} to deserialize objects that are then compared via |
| their {@link Comparable} interfaces. |
| </p> |
| @param <T> |
| @see JavaSerialization]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.serializer.JavaSerializationComparator --> |
| <!-- start interface org.apache.hadoop.io.serializer.Serialization --> |
| <interface name="Serialization" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="accept" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class<?>"/> |
| <doc> |
| <![CDATA[Allows clients to test whether this {@link Serialization} |
| supports the given class.]]> |
| </doc> |
| </method> |
| <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer<T>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class<T>"/> |
| <doc> |
| <![CDATA[@return a {@link Serializer} for the given class.]]> |
| </doc> |
| </method> |
| <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer<T>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class<T>"/> |
| <doc> |
| <![CDATA[@return a {@link Deserializer} for the given class.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[<p> |
| Encapsulates a {@link Serializer}/{@link Deserializer} pair. |
| </p> |
| @param <T>]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.io.serializer.Serialization --> |
| <!-- start class org.apache.hadoop.io.serializer.SerializationFactory --> |
| <class name="SerializationFactory" extends="org.apache.hadoop.conf.Configured" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SerializationFactory" type="org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[<p> |
| Serializations are found by reading the <code>io.serializations</code> |
| property from <code>conf</code>, which is a comma-delimited list of |
| classnames. |
| </p>]]> |
| </doc> |
| </constructor> |
| <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer<T>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class<T>"/> |
| </method> |
| <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer<T>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class<T>"/> |
| </method> |
| <method name="getSerialization" return="org.apache.hadoop.io.serializer.Serialization<T>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class<T>"/> |
| </method> |
| <doc> |
| <![CDATA[<p> |
| A factory for {@link Serialization}s. |
| </p>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.serializer.SerializationFactory --> |
| <!-- start interface org.apache.hadoop.io.serializer.Serializer --> |
| <interface name="Serializer" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="open" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.OutputStream"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[<p>Prepare the serializer for writing.</p>]]> |
| </doc> |
| </method> |
| <method name="serialize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="t" type="T"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[<p>Serialize <code>t</code> to the underlying output stream.</p>]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[<p>Close the underlying output stream and clear up any resources.</p>]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[<p> |
| Provides a facility for serializing objects of type <T> to an |
| {@link OutputStream}. |
| </p> |
| |
| <p> |
| Serializers are stateful, but must not buffer the output since |
| other producers may write to the output between calls to |
| {@link #serialize(Object)}. |
| </p> |
| @param <T>]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.io.serializer.Serializer --> |
| <!-- start class org.apache.hadoop.io.serializer.WritableSerialization --> |
| <class name="WritableSerialization" extends="org.apache.hadoop.conf.Configured" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.serializer.Serialization<org.apache.hadoop.io.Writable>"/> |
| <constructor name="WritableSerialization" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="accept" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class<?>"/> |
| </method> |
| <method name="getDeserializer" return="org.apache.hadoop.io.serializer.Deserializer<org.apache.hadoop.io.Writable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class<org.apache.hadoop.io.Writable>"/> |
| </method> |
| <method name="getSerializer" return="org.apache.hadoop.io.serializer.Serializer<org.apache.hadoop.io.Writable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class<org.apache.hadoop.io.Writable>"/> |
| </method> |
| <doc> |
| <![CDATA[A {@link Serialization} for {@link Writable}s that delegates to |
| {@link Writable#write(java.io.DataOutput)} and |
| {@link Writable#readFields(java.io.DataInput)}.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.io.serializer.WritableSerialization --> |
| <doc> |
| <![CDATA[<p> |
| This package provides a mechanism for using different serialization frameworks |
| in Hadoop. The property "io.serializations" defines a list of |
| {@link org.apache.hadoop.io.serializer.Serialization}s that know how to create |
| {@link org.apache.hadoop.io.serializer.Serializer}s and |
| {@link org.apache.hadoop.io.serializer.Deserializer}s. |
| </p> |
| |
| <p> |
| To add a new serialization framework write an implementation of |
| {@link org.apache.hadoop.io.serializer.Serialization} and add its name to the |
| "io.serializations" property. |
| </p>]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.ipc"> |
| <!-- start class org.apache.hadoop.ipc.Client --> |
| <class name="Client" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Client" type="java.lang.Class, org.apache.hadoop.conf.Configuration, javax.net.SocketFactory" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct an IPC client whose values are of the given {@link Writable} |
| class.]]> |
| </doc> |
| </constructor> |
| <constructor name="Client" type="java.lang.Class<?>, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct an IPC client with the default SocketFactory |
| @param valueClass |
| @param conf]]> |
| </doc> |
| </constructor> |
| <method name="stop" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Stop all threads related to this client. No further calls may be made |
| using this client.]]> |
| </doc> |
| </method> |
| <method name="setTimeout" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="timeout" type="int"/> |
| <doc> |
| <![CDATA[Sets the timeout used for network i/o.]]> |
| </doc> |
| </method> |
| <method name="call" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="param" type="org.apache.hadoop.io.Writable"/> |
| <param name="address" type="java.net.InetSocketAddress"/> |
| <exception name="InterruptedException" type="java.lang.InterruptedException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Make a call, passing <code>param</code>, to the IPC server running at |
| <code>address</code>, returning the value. Throws exceptions if there are |
| network problems or if the remote code threw an exception.]]> |
| </doc> |
| </method> |
| <method name="call" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="param" type="org.apache.hadoop.io.Writable"/> |
| <param name="addr" type="java.net.InetSocketAddress"/> |
| <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/> |
| <exception name="InterruptedException" type="java.lang.InterruptedException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="call" return="org.apache.hadoop.io.Writable[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="params" type="org.apache.hadoop.io.Writable[]"/> |
| <param name="addresses" type="java.net.InetSocketAddress[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Makes a set of calls in parallel. Each parameter is sent to the |
| corresponding address. When all values are available, or have timed out |
| or errored, the collected results are returned in an array. The array |
| contains nulls for calls that timed out or errored.]]> |
| </doc> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[A client for an IPC service. IPC calls take a single {@link Writable} as a |
| parameter, and return a {@link Writable} as their value. A service runs on |
| a port and is defined by a parameter class and a value class. |
| |
| @see Server]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.ipc.Client --> |
| <!-- start class org.apache.hadoop.ipc.RemoteException --> |
| <class name="RemoteException" extends="java.io.IOException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="RemoteException" type="java.lang.String, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getClassName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="unwrapRemoteException" return="java.io.IOException" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="lookupTypes" type="java.lang.Class[]"/> |
| <doc> |
| <![CDATA[If this remote exception wraps up one of the lookupTypes |
| then return this exception. |
| <p> |
| Unwraps any IOException. |
| |
| @param lookupTypes the desired exception class. |
| @return IOException, which is either the lookupClass exception or this.]]> |
| </doc> |
| </method> |
| <method name="unwrapRemoteException" return="java.io.IOException" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Instantiate and return the exception wrapped up by this remote exception. |
| |
| <p> This unwraps any <code>Throwable</code> that has a constructor taking |
| a <code>String</code> as a parameter. |
| Otherwise it returns this. |
| |
| @return <code>Throwable]]> |
| </doc> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.ipc.RemoteException --> |
| <!-- start class org.apache.hadoop.ipc.RPC --> |
| <class name="RPC" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="waitForProxy" return="org.apache.hadoop.ipc.VersionedProtocol" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="protocol" type="java.lang.Class"/> |
| <param name="clientVersion" type="long"/> |
| <param name="addr" type="java.net.InetSocketAddress"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="protocol" type="java.lang.Class<?>"/> |
| <param name="clientVersion" type="long"/> |
| <param name="addr" type="java.net.InetSocketAddress"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="factory" type="javax.net.SocketFactory"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct a client-side proxy object that implements the named protocol, |
| talking to a server at the named address.]]> |
| </doc> |
| </method> |
| <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="protocol" type="java.lang.Class<?>"/> |
| <param name="clientVersion" type="long"/> |
| <param name="addr" type="java.net.InetSocketAddress"/> |
| <param name="ticket" type="org.apache.hadoop.security.UserGroupInformation"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="factory" type="javax.net.SocketFactory"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct a client-side proxy object that implements the named protocol, |
| talking to a server at the named address.]]> |
| </doc> |
| </method> |
| <method name="getProxy" return="org.apache.hadoop.ipc.VersionedProtocol" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="protocol" type="java.lang.Class<?>"/> |
| <param name="clientVersion" type="long"/> |
| <param name="addr" type="java.net.InetSocketAddress"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct a client-side proxy object with the default SocketFactory |
| |
| @param protocol |
| @param clientVersion |
| @param addr |
| @param conf |
| @return a proxy instance |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="stopProxy" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="proxy" type="org.apache.hadoop.ipc.VersionedProtocol"/> |
| <doc> |
| <![CDATA[Stop this proxy and release its invoker's resource |
| @param proxy the proxy to be stopped]]> |
| </doc> |
| </method> |
| <method name="call" return="java.lang.Object[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="method" type="java.lang.reflect.Method"/> |
| <param name="params" type="java.lang.Object[][]"/> |
| <param name="addrs" type="java.net.InetSocketAddress[]"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Expert: Make multiple, parallel calls to a set of servers.]]> |
| </doc> |
| </method> |
| <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="instance" type="java.lang.Object"/> |
| <param name="bindAddress" type="java.lang.String"/> |
| <param name="port" type="int"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct a server for a protocol implementation instance listening on a |
| port and address.]]> |
| </doc> |
| </method> |
| <method name="getServer" return="org.apache.hadoop.ipc.RPC.Server" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="instance" type="java.lang.Object"/> |
| <param name="bindAddress" type="java.lang.String"/> |
| <param name="port" type="int"/> |
| <param name="numHandlers" type="int"/> |
| <param name="verbose" type="boolean"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct a server for a protocol implementation instance listening on a |
| port and address.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A simple RPC mechanism. |
| |
| A <i>protocol</i> is a Java interface. All parameters and return types must |
| be one of: |
| |
| <ul> <li>a primitive type, <code>boolean</code>, <code>byte</code>, |
| <code>char</code>, <code>short</code>, <code>int</code>, <code>long</code>, |
| <code>float</code>, <code>double</code>, or <code>void</code>; or</li> |
| |
| <li>a {@link String}; or</li> |
| |
| <li>a {@link Writable}; or</li> |
| |
| <li>an array of the above types</li> </ul> |
| |
| All methods in the protocol should throw only IOException. No field data of |
| the protocol instance is transmitted.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.ipc.RPC --> |
| <!-- start class org.apache.hadoop.ipc.RPC.Server --> |
| <class name="RPC.Server" extends="org.apache.hadoop.ipc.Server" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct an RPC server. |
| @param instance the instance whose methods will be called |
| @param conf the configuration to use |
| @param bindAddress the address to bind on to listen for connection |
| @param port the port to listen for connections on]]> |
| </doc> |
| </constructor> |
| <constructor name="RPC.Server" type="java.lang.Object, org.apache.hadoop.conf.Configuration, java.lang.String, int, int, boolean" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct an RPC server. |
| @param instance the instance whose methods will be called |
| @param conf the configuration to use |
| @param bindAddress the address to bind on to listen for connection |
| @param port the port to listen for connections on |
| @param numHandlers the number of method handler threads to run |
| @param verbose whether each call should be logged]]> |
| </doc> |
| </constructor> |
| <method name="call" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="param" type="org.apache.hadoop.io.Writable"/> |
| <param name="receivedTime" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[An RPC Server.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.ipc.RPC.Server --> |
| <!-- start class org.apache.hadoop.ipc.RPC.VersionMismatch --> |
| <class name="RPC.VersionMismatch" extends="java.io.IOException" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="RPC.VersionMismatch" type="java.lang.String, long, long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a version mismatch exception |
| @param interfaceName the name of the protocol mismatch |
| @param clientVersion the client's version of the protocol |
| @param serverVersion the server's version of the protocol]]> |
| </doc> |
| </constructor> |
| <method name="getInterfaceName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the interface name |
| @return the java class name |
| (eg. org.apache.hadoop.mapred.InterTrackerProtocol)]]> |
| </doc> |
| </method> |
| <method name="getClientVersion" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the client's preferred version]]> |
| </doc> |
| </method> |
| <method name="getServerVersion" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the server's agreed to version.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A version mismatch for the RPC protocol.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.ipc.RPC.VersionMismatch --> |
| <!-- start class org.apache.hadoop.ipc.Server --> |
| <class name="Server" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Server" type="java.lang.String, int, java.lang.Class, int, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <constructor name="Server" type="java.lang.String, int, java.lang.Class, int, org.apache.hadoop.conf.Configuration, java.lang.String" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Constructs a server listening on the named port and address. Parameters passed must |
| be of the named class. The <code>handlerCount</handlerCount> determines |
| the number of handler threads that will be used to process calls.]]> |
| </doc> |
| </constructor> |
| <method name="get" return="org.apache.hadoop.ipc.Server" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the server instance called under or null. May be called under |
| {@link #call(Writable, long)} implementations, and under {@link Writable} |
| methods of paramters and return values. Permits applications to access |
| the server context.]]> |
| </doc> |
| </method> |
| <method name="getRemoteIp" return="java.net.InetAddress" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the remote side ip address when invoked inside an RPC |
| Returns null incase of an error.]]> |
| </doc> |
| </method> |
| <method name="getRemoteAddress" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns remote address as a string when invoked inside an RPC. |
| Returns null in case of an error.]]> |
| </doc> |
| </method> |
| <method name="bind" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="socket" type="java.net.ServerSocket"/> |
| <param name="address" type="java.net.InetSocketAddress"/> |
| <param name="backlog" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[A convience method to bind to a given address and report |
| better exceptions if the address is not a valid host. |
| @param socket the socket to bind |
| @param address the address to bind to |
| @param backlog the number of connections allowed in the queue |
| @throws BindException if the address can't be bound |
| @throws UnknownHostException if the address isn't a valid host name |
| @throws IOException other random errors from bind]]> |
| </doc> |
| </method> |
| <method name="setTimeout" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="timeout" type="int"/> |
| <doc> |
| <![CDATA[Sets the timeout used for network i/o.]]> |
| </doc> |
| </method> |
| <method name="setSocketSendBufSize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="size" type="int"/> |
| <doc> |
| <![CDATA[Sets the socket buffer size used for responding to RPCs]]> |
| </doc> |
| </method> |
| <method name="start" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Starts the service. Must be called before any calls will be handled.]]> |
| </doc> |
| </method> |
| <method name="stop" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Stops the service. No new calls will be handled after this is called.]]> |
| </doc> |
| </method> |
| <method name="join" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="InterruptedException" type="java.lang.InterruptedException"/> |
| <doc> |
| <![CDATA[Wait for the server to be stopped. |
| Does not wait for all subthreads to finish. |
| See {@link #stop()}.]]> |
| </doc> |
| </method> |
| <method name="getListenerAddress" return="java.net.InetSocketAddress" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the socket (ip+port) on which the RPC server is listening to. |
| @return the socket (ip+port) on which the RPC server is listening to.]]> |
| </doc> |
| </method> |
| <method name="call" return="org.apache.hadoop.io.Writable" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="param" type="org.apache.hadoop.io.Writable"/> |
| <param name="receiveTime" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Called for each call.]]> |
| </doc> |
| </method> |
| <method name="getNumOpenConnections" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The number of open RPC conections |
| @return the number of open rpc connections]]> |
| </doc> |
| </method> |
| <method name="getCallQueueLen" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The number of rpc calls in the queue. |
| @return The number of rpc calls in the queue.]]> |
| </doc> |
| </method> |
| <field name="HEADER" type="java.nio.ByteBuffer" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The first four bytes of Hadoop RPC connections]]> |
| </doc> |
| </field> |
| <field name="CURRENT_VERSION" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="rpcMetrics" type="org.apache.hadoop.ipc.metrics.RpcMetrics" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[An abstract IPC service. IPC calls take a single {@link Writable} as a |
| parameter, and return a {@link Writable} as their value. A service runs on |
| a port and is defined by a parameter class and a value class. |
| |
| @see Client]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.ipc.Server --> |
| <!-- start interface org.apache.hadoop.ipc.VersionedProtocol --> |
| <interface name="VersionedProtocol" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getProtocolVersion" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="protocol" type="java.lang.String"/> |
| <param name="clientVersion" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return protocol version corresponding to protocol interface. |
| @param protocol The classname of the protocol interface |
| @param clientVersion The version of the protocol that the client speaks |
| @return the version that the server will speak]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Superclass of all protocols that use Hadoop RPC. |
| Subclasses of this interface are also supposed to have |
| a static final long versionID field.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.ipc.VersionedProtocol --> |
| <doc> |
| <![CDATA[Tools to help define network clients and servers.]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.ipc.metrics"> |
| <!-- start class org.apache.hadoop.ipc.metrics.RpcMetrics --> |
| <class name="RpcMetrics" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.metrics.Updater"/> |
| <constructor name="RpcMetrics" type="java.lang.String, java.lang.String, org.apache.hadoop.ipc.Server" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="doUpdates" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/> |
| <doc> |
| <![CDATA[Push the metrics to the monitoring subsystem on doUpdate() call.]]> |
| </doc> |
| </method> |
| <method name="shutdown" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <field name="rpcQueueTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The metrics variables are public: |
| - they can be set directly by calling their set/inc methods |
| -they can also be read directly - e.g. JMX does this.]]> |
| </doc> |
| </field> |
| <field name="rpcProcessingTime" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="rpcDiscardedOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="metricsList" type="java.util.Map<java.lang.String, org.apache.hadoop.metrics.util.MetricsTimeVaryingRate>" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[This class is for maintaining the various RPC statistics |
| and publishing them through the metrics interfaces. |
| This also registers the JMX MBean for RPC. |
| <p> |
| This class has a number of metrics variables that are publicly accessible; |
| these variables (objects) have methods to update their values; |
| for example: |
| <p> {@link #rpcDiscardedOps}.inc(time)]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.ipc.metrics.RpcMetrics --> |
| <!-- start interface org.apache.hadoop.ipc.metrics.RpcMgtMBean --> |
| <interface name="RpcMgtMBean" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getRpcOpsNumber" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of RPC Operations in the last interval |
| @return number of operations]]> |
| </doc> |
| </method> |
| <method name="getRpcOpsAvgProcessingTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Average time for RPC Operations in last interval |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getRpcOpsAvgProcessingTimeMin" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Minimum RPC Operation Processing Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getRpcOpsAvgProcessingTimeMax" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Maximum RPC Operation Processing Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getRpcOpsAvgQueueTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Average RPC Operation Queued Time in the last interval |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getRpcOpsAvgQueueTimeMin" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Minimum RPC Operation Queued Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getRpcOpsAvgQueueTimeMax" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Maximum RPC Operation Queued Time since reset was called |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="getRpcOpsDiscardedOpsNum" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Number of Discarded RPC operations due to timeout in the last interval |
| @return number of operations]]> |
| </doc> |
| </method> |
| <method name="getRpcOpsDiscardedOpsQtime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Average Queued time for Discarded RPC Operations in last interval |
| @return time in msec]]> |
| </doc> |
| </method> |
| <method name="resetAllMinMax" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Reset all min max times]]> |
| </doc> |
| </method> |
| <method name="getNumOpenConnections" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The number of open RPC conections |
| @return the number of open rpc connections]]> |
| </doc> |
| </method> |
| <method name="getCallQueueLen" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The number of rpc calls in the queue. |
| @return The number of rpc calls in the queue.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This is the JMX management interface for the RPC layer. |
| Many of the statistics are sampled and averaged on an interval |
| which can be specified in the metrics config file. |
| <p> |
| For the statistics that are sampled and averaged, one must specify |
| a metrics context that does periodic update calls. Most do. |
| The default Null metrics context however does NOT. So if you aren't |
| using any other metrics context then you can turn on the viewing and averaging |
| of sampled metrics by specifying the following two lines |
| in the hadoop-meterics.properties file: |
| <pre> |
| rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread |
| rpc.period=10 |
| </pre> |
| <p> |
| Note that the metrics are collected regardless of the context used. |
| The context with the update thread is used to average the data periodically]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.ipc.metrics.RpcMgtMBean --> |
| </package> |
| <package name="org.apache.hadoop.log"> |
| <!-- start class org.apache.hadoop.log.LogLevel --> |
| <class name="LogLevel" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="LogLevel" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <doc> |
| <![CDATA[A command line implementation]]> |
| </doc> |
| </method> |
| <field name="USAGES" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Change log level in runtime.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.log.LogLevel --> |
| <!-- start class org.apache.hadoop.log.LogLevel.Servlet --> |
| <class name="LogLevel.Servlet" extends="javax.servlet.http.HttpServlet" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="LogLevel.Servlet" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="doGet" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="request" type="javax.servlet.http.HttpServletRequest"/> |
| <param name="response" type="javax.servlet.http.HttpServletResponse"/> |
| <exception name="ServletException" type="javax.servlet.ServletException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A servlet implementation]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.log.LogLevel.Servlet --> |
| </package> |
| <package name="org.apache.hadoop.mapred"> |
| <!-- start class org.apache.hadoop.mapred.ClusterStatus --> |
| <class name="ClusterStatus" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <method name="getTaskTrackers" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the number of task trackers in the cluster. |
| |
| @return the number of task trackers in the cluster.]]> |
| </doc> |
| </method> |
| <method name="getMapTasks" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the number of currently running map tasks in the cluster. |
| |
| @return the number of currently running map tasks in the cluster.]]> |
| </doc> |
| </method> |
| <method name="getReduceTasks" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the number of currently running reduce tasks in the cluster. |
| |
| @return the number of currently running reduce tasks in the cluster.]]> |
| </doc> |
| </method> |
| <method name="getMaxMapTasks" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the maximum capacity for running map tasks in the cluster. |
| |
| @return the maximum capacity for running map tasks in the cluster.]]> |
| </doc> |
| </method> |
| <method name="getMaxReduceTasks" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the maximum capacity for running reduce tasks in the cluster. |
| |
| @return the maximum capacity for running reduce tasks in the cluster.]]> |
| </doc> |
| </method> |
| <method name="getJobTrackerState" return="org.apache.hadoop.mapred.JobTracker.State" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the current state of the <code>JobTracker</code>, |
| as {@link JobTracker.State} |
| |
| @return the current state of the <code>JobTracker</code>.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[Status information on the current state of the Map-Reduce cluster. |
| |
| <p><code>ClusterStatus</code> provides clients with information such as: |
| <ol> |
| <li> |
| Size of the cluster. |
| </li> |
| <li> |
| Task capacity of the cluster. |
| </li> |
| <li> |
| The number of currently running map & reduce tasks. |
| </li> |
| <li> |
| State of the <code>JobTracker</code>. |
| </li> |
| </ol></p> |
| |
| <p>Clients can query for the latest <code>ClusterStatus</code>, via |
| {@link JobClient#getClusterStatus()}.</p> |
| |
| @see JobClient]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.ClusterStatus --> |
| <!-- start class org.apache.hadoop.mapred.CompletedJobStatusStore --> |
| <class name="CompletedJobStatusStore" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.lang.Runnable"/> |
| <method name="isActive" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Indicates if job status persistency is active or not. |
| |
| @return TRUE if active, FALSE otherwise.]]> |
| </doc> |
| </method> |
| <method name="run" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="store" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobInProgress"/> |
| <doc> |
| <![CDATA[Persists a job in DFS. |
| |
| @param job the job about to be 'retired']]> |
| </doc> |
| </method> |
| <method name="readJobStatus" return="org.apache.hadoop.mapred.JobStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <doc> |
| <![CDATA[This method retrieves JobStatus information from DFS stored using |
| store method. |
| |
| @param jobId the jobId for which jobStatus is queried |
| @return JobStatus object, null if not able to retrieve]]> |
| </doc> |
| </method> |
| <method name="readJobProfile" return="org.apache.hadoop.mapred.JobProfile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <doc> |
| <![CDATA[This method retrieves JobProfile information from DFS stored using |
| store method. |
| |
| @param jobId the jobId for which jobProfile is queried |
| @return JobProfile object, null if not able to retrieve]]> |
| </doc> |
| </method> |
| <method name="readCounters" return="org.apache.hadoop.mapred.Counters" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <doc> |
| <![CDATA[This method retrieves Counters information from DFS stored using |
| store method. |
| |
| @param jobId the jobId for which Counters is queried |
| @return Counters object, null if not able to retrieve]]> |
| </doc> |
| </method> |
| <method name="readJobTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="fromEventId" type="int"/> |
| <param name="maxEvents" type="int"/> |
| <doc> |
| <![CDATA[This method retrieves TaskCompletionEvents information from DFS stored |
| using store method. |
| |
| @param jobId the jobId for which TaskCompletionEvents is queried |
| @param fromEventId events offset |
| @param maxEvents max number of events |
| @return TaskCompletionEvent[], empty array if not able to retrieve]]> |
| </doc> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Persists and retrieves the Job info of a job into/from DFS. |
| <p/> |
| If the retain time is zero jobs are not persisted. |
| <p/> |
| A daemon thread cleans up job info files older than the retain time |
| <p/> |
| The retain time can be set with the 'persist.jobstatus.hours' |
| configuration variable (it is in hours).]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.CompletedJobStatusStore --> |
| <!-- start class org.apache.hadoop.mapred.Counters --> |
| <class name="Counters" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <implements name="java.lang.Iterable<org.apache.hadoop.mapred.Counters.Group>"/> |
| <constructor name="Counters" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getGroupNames" return="java.util.Collection<java.lang.String>" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the names of all counter classes. |
| @return Set of counter names.]]> |
| </doc> |
| </method> |
| <method name="iterator" return="java.util.Iterator<org.apache.hadoop.mapred.Counters.Group>" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getGroup" return="org.apache.hadoop.mapred.Counters.Group" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="groupName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Returns the named counter group, or an empty group if there is none |
| with the specified name.]]> |
| </doc> |
| </method> |
| <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="java.lang.Enum"/> |
| <doc> |
| <![CDATA[Find the counter for the given enum. The same enum will always return the |
| same counter. |
| @param key the counter key |
| @return the matching counter object]]> |
| </doc> |
| </method> |
| <method name="findCounter" return="org.apache.hadoop.mapred.Counters.Counter" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="group" type="java.lang.String"/> |
| <param name="id" type="int"/> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Find a counter by using strings |
| @param group the name of the group |
| @param id the id of the counter within the group (0 to N-1) |
| @param name the internal name of the counter |
| @return the counter for that name]]> |
| </doc> |
| </method> |
| <method name="incrCounter" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="java.lang.Enum"/> |
| <param name="amount" type="long"/> |
| <doc> |
| <![CDATA[Increments the specified counter by the specified amount, creating it if |
| it didn't already exist. |
| @param key identifies a counter |
| @param amount amount by which counter is to be incremented]]> |
| </doc> |
| </method> |
| <method name="getCounter" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="java.lang.Enum"/> |
| <doc> |
| <![CDATA[Returns current value of the specified counter, or 0 if the counter |
| does not exist.]]> |
| </doc> |
| </method> |
| <method name="incrAllCounters" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="other" type="org.apache.hadoop.mapred.Counters"/> |
| <doc> |
| <![CDATA[Increments multiple counters by their amounts in another Counters |
| instance. |
| @param other the other Counters instance]]> |
| </doc> |
| </method> |
| <method name="sum" return="org.apache.hadoop.mapred.Counters" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="a" type="org.apache.hadoop.mapred.Counters"/> |
| <param name="b" type="org.apache.hadoop.mapred.Counters"/> |
| <doc> |
| <![CDATA[Convenience method for computing the sum of two sets of counters.]]> |
| </doc> |
| </method> |
| <method name="size" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the total number of counters, by summing the number of counters |
| in each group.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write the set of groups. |
| The external format is: |
| #groups (groupName group)* |
| |
| i.e. the number of groups followed by 0 or more groups, where each |
| group is of the form: |
| |
| groupDisplayName #counters (false | true counter)* |
| |
| where each counter is of the form: |
| |
| name value]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read a set of groups.]]> |
| </doc> |
| </method> |
| <method name="log" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="log" type="org.apache.commons.logging.Log"/> |
| <doc> |
| <![CDATA[Logs the current counter values. |
| @param log The log to use.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return textual representation of the counter values.]]> |
| </doc> |
| </method> |
| <method name="makeCompactString" return="java.lang.String" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Convert a counters object into a single line that is easy to parse. |
| @return the string with "name=value" for each counter and separated by ","]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A set of named counters. |
| |
| <p><code>Counters</code> represent global counters, defined either by the |
| Map-Reduce framework or applications. Each <code>Counter</code> can be of |
| any {@link Enum} type.</p> |
| |
| <p><code>Counters</code> are bunched into {@link Group}s, each comprising of |
| counters from a particular <code>Enum</code> class.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.Counters --> |
| <!-- start class org.apache.hadoop.mapred.Counters.Counter --> |
| <class name="Counters.Counter" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read the binary representation of the counter]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write the binary representation of the counter]]> |
| </doc> |
| </method> |
| <method name="getDisplayName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the name of the counter. |
| @return the user facing name of the counter]]> |
| </doc> |
| </method> |
| <method name="getCounter" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[What is the current value of this counter? |
| @return the current value]]> |
| </doc> |
| </method> |
| <method name="increment" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="incr" type="long"/> |
| <doc> |
| <![CDATA[Increment this counter by the given value |
| @param incr the value to increase this counter by]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A counter record, comprising its name and value.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.Counters.Counter --> |
| <!-- start class org.apache.hadoop.mapred.Counters.Group --> |
| <class name="Counters.Group" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <implements name="java.lang.Iterable<org.apache.hadoop.mapred.Counters.Counter>"/> |
| <method name="getName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns raw name of the group. This is the name of the enum class |
| for this group of counters.]]> |
| </doc> |
| </method> |
| <method name="getDisplayName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns localized name of the group. This is the same as getName() by |
| default, but different if an appropriate ResourceBundle is found.]]> |
| </doc> |
| </method> |
| <method name="getCounter" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="counterName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Returns the value of the specified counter, or 0 if the counter does |
| not exist.]]> |
| </doc> |
| </method> |
| <method name="getCounter" return="org.apache.hadoop.mapred.Counters.Counter" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="id" type="int"/> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Get the counter for the given id and create it if it doesn't exist. |
| @param id the numeric id of the counter within the group |
| @param name the internal counter name |
| @return the counter]]> |
| </doc> |
| </method> |
| <method name="size" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the number of counters in this group.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="iterator" return="java.util.Iterator<org.apache.hadoop.mapred.Counters.Counter>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[<code>Group</code> of counters, comprising of counters from a particular |
| counter {@link Enum} class. |
| |
| <p><code>Group</code>handles localization of the class name and the |
| counter names.</p>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.Counters.Group --> |
| <!-- start class org.apache.hadoop.mapred.DefaultJobHistoryParser --> |
| <class name="DefaultJobHistoryParser" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="DefaultJobHistoryParser" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="parseJobTasks" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobHistoryFile" type="java.lang.String"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobHistory.JobInfo"/> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Populates a JobInfo object from the job's history log file. |
| @param jobHistoryFile history file for this job. |
| @param job a precreated JobInfo object, should be non-null. |
| @param fs FileSystem where historyFile is present. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Default parser for job history files. It creates object model from |
| job history file.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.DefaultJobHistoryParser --> |
| <!-- start class org.apache.hadoop.mapred.FileAlreadyExistsException --> |
| <class name="FileAlreadyExistsException" extends="java.io.IOException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="FileAlreadyExistsException" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="FileAlreadyExistsException" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[Used when target file already exists for any operation and |
| is not configured to be overwritten.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.FileAlreadyExistsException --> |
| <!-- start class org.apache.hadoop.mapred.FileInputFormat --> |
| <class name="FileInputFormat" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.InputFormat<K, V>"/> |
| <constructor name="FileInputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setMinSplitSize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="minSplitSize" type="long"/> |
| </method> |
| <method name="isSplitable" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="filename" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Is the given filename splitable? Usually, true, but if the file is |
| stream compressed, it will not be. |
| |
| <code>FileInputFormat</code> implementations can override this and return |
| <code>false</code> to ensure that individual input files are never split-up |
| so that {@link Mapper}s process entire files. |
| |
| @param fs the file system that the file is on |
| @param filename the file name to check |
| @return is this file splitable?]]> |
| </doc> |
| </method> |
| <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader<K, V>" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="split" type="org.apache.hadoop.mapred.InputSplit"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="setInputPathFilter" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="filter" type="java.lang.Class<? extends org.apache.hadoop.fs.PathFilter>"/> |
| <doc> |
| <![CDATA[Set a PathFilter to be applied to the input paths for the map-reduce job. |
| |
| @param filter the PathFilter class use for filtering the input paths.]]> |
| </doc> |
| </method> |
| <method name="getInputPathFilter" return="org.apache.hadoop.fs.PathFilter" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Get a PathFilter instance of the filter set for the input paths. |
| |
| @return the PathFilter instance set for the job, NULL if none has been set.]]> |
| </doc> |
| </method> |
| <method name="listPaths" return="org.apache.hadoop.fs.Path[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[List input directories. |
| Subclasses may override to, e.g., select only files matching a regular |
| expression. |
| |
| @param job the job to list input paths for |
| @return array of Path objects |
| @throws IOException if zero items.]]> |
| </doc> |
| </method> |
| <method name="validateInput" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="numSplits" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Splits files returned by {@link #listPaths(JobConf)} when |
| they're too big.]]> |
| </doc> |
| </method> |
| <method name="computeSplitSize" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="goalSize" type="long"/> |
| <param name="minSize" type="long"/> |
| <param name="blockSize" type="long"/> |
| </method> |
| <method name="getBlockIndex" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="blkLocations" type="org.apache.hadoop.fs.BlockLocation[]"/> |
| <param name="offset" type="long"/> |
| </method> |
| <method name="setInputPaths" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="commaSeparatedPaths" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Sets the given comma separated paths as the list of inputs |
| for the map-reduce job. |
| |
| @param conf Configuration of the job |
| @param commaSeparatedPaths Comma separated paths to be set as |
| the list of inputs for the map-reduce job.]]> |
| </doc> |
| </method> |
| <method name="addInputPaths" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="commaSeparatedPaths" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Add the given comma separated paths to the list of inputs for |
| the map-reduce job. |
| |
| @param conf The configuration of the job |
| @param commaSeparatedPaths Comma separated paths to be added to |
| the list of inputs for the map-reduce job.]]> |
| </doc> |
| </method> |
| <method name="setInputPaths" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="inputPaths" type="org.apache.hadoop.fs.Path[]"/> |
| <doc> |
| <![CDATA[Set the array of {@link Path}s as the list of inputs |
| for the map-reduce job. |
| |
| @param conf Configuration of the job. |
| @param inputPaths the {@link Path}s of the input directories/files |
| for the map-reduce job.]]> |
| </doc> |
| </method> |
| <method name="addInputPath" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job. |
| |
| @param conf The configuration of the job |
| @param path {@link Path} to be added to the list of inputs for |
| the map-reduce job.]]> |
| </doc> |
| </method> |
| <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Get the list of input {@link Path}s for the map-reduce job. |
| |
| @param conf The configuration of the job |
| @return the list of input {@link Path}s for the map-reduce job.]]> |
| </doc> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[A base class for file-based {@link InputFormat}. |
| |
| <p><code>FileInputFormat</code> is the base class for all file-based |
| <code>InputFormat</code>s. This provides generic implementations of |
| {@link #validateInput(JobConf)} and {@link #getSplits(JobConf, int)}. |
| Implementations fo <code>FileInputFormat</code> can also override the |
| {@link #isSplitable(FileSystem, Path)} method to ensure input-files are |
| not split-up and are processed as a whole by {@link Mapper}s.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.FileInputFormat --> |
| <!-- start class org.apache.hadoop.mapred.FileOutputFormat --> |
| <class name="FileOutputFormat" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.OutputFormat<K, V>"/> |
| <constructor name="FileOutputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setCompressOutput" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="compress" type="boolean"/> |
| <doc> |
| <![CDATA[Set whether the output of the job is compressed. |
| @param conf the {@link JobConf} to modify |
| @param compress should the output of the job be compressed?]]> |
| </doc> |
| </method> |
| <method name="getCompressOutput" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Is the job output compressed? |
| @param conf the {@link JobConf} to look in |
| @return <code>true</code> if the job output should be compressed, |
| <code>false</code> otherwise]]> |
| </doc> |
| </method> |
| <method name="setOutputCompressorClass" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="codecClass" type="java.lang.Class<? extends org.apache.hadoop.io.compress.CompressionCodec>"/> |
| <doc> |
| <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs. |
| @param conf the {@link JobConf} to modify |
| @param codecClass the {@link CompressionCodec} to be used to |
| compress the job outputs]]> |
| </doc> |
| </method> |
| <method name="getOutputCompressorClass" return="java.lang.Class<? extends org.apache.hadoop.io.compress.CompressionCodec>" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="defaultValue" type="java.lang.Class<? extends org.apache.hadoop.io.compress.CompressionCodec>"/> |
| <doc> |
| <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs. |
| @param conf the {@link JobConf} to look in |
| @param defaultValue the {@link CompressionCodec} to return if not set |
| @return the {@link CompressionCodec} to be used to compress the |
| job outputs |
| @throws IllegalArgumentException if the class was specified, but not found]]> |
| </doc> |
| </method> |
| <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter<K, V>" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="name" type="java.lang.String"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="checkOutputSpecs" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/> |
| <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="setOutputPath" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="outputDir" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Set the {@link Path} of the output directory for the map-reduce job. |
| |
| @param conf The configuration of the job. |
| @param outputDir the {@link Path} of the output directory for |
| the map-reduce job.]]> |
| </doc> |
| </method> |
| <method name="getOutputPath" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Get the {@link Path} to the output directory for the map-reduce job. |
| |
| @return the {@link Path} to the output directory for the map-reduce job. |
| @see FileOutputFormat#getWorkOutputPath(JobConf)]]> |
| </doc> |
| </method> |
| <method name="getWorkOutputPath" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Get the {@link Path} to the task's temporary output directory |
| for the map-reduce job |
| |
| <h4 id="SideEffectFiles">Tasks' Side-Effect Files</h4> |
| |
| <p>Some applications need to create/write-to side-files, which differ from |
| the actual job-outputs. |
| |
| <p>In such cases there could be issues with 2 instances of the same TIP |
| (running simultaneously e.g. speculative tasks) trying to open/write-to the |
| same file (path) on HDFS. Hence the application-writer will have to pick |
| unique names per task-attempt (e.g. using the taskid, say |
| <tt>task_200709221812_0001_m_000000_0</tt>), not just per TIP.</p> |
| |
| <p>To get around this the Map-Reduce framework helps the application-writer |
| out by maintaining a special |
| <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> |
| sub-directory for each task-attempt on HDFS where the output of the |
| task-attempt goes. On successful completion of the task-attempt the files |
| in the <tt>${mapred.output.dir}/_temporary/_${taskid}</tt> (only) |
| are <i>promoted</i> to <tt>${mapred.output.dir}</tt>. Of course, the |
| framework discards the sub-directory of unsuccessful task-attempts. This |
| is completely transparent to the application.</p> |
| |
| <p>The application-writer can take advantage of this by creating any |
| side-files required in <tt>${mapred.work.output.dir}</tt> during execution |
| of his reduce-task i.e. via {@link #getWorkOutputPath(JobConf)}, and the |
| framework will move them out similarly - thus she doesn't have to pick |
| unique paths per task-attempt.</p> |
| |
| <p><i>Note</i>: the value of <tt>${mapred.work.output.dir}</tt> during |
| execution of a particular task-attempt is actually |
| <tt>${mapred.output.dir}/_temporary/_{$taskid}</tt>, and this value is |
| set by the map-reduce framework. So, just create any side-files in the |
| path returned by {@link #getWorkOutputPath(JobConf)} from map/reduce |
| task to take advantage of this feature.</p> |
| |
| <p>The entire discussion holds true for maps of jobs with |
| reducer=NONE (i.e. 0 reduces) since output of the map, in that case, |
| goes directly to HDFS.</p> |
| |
| @return the {@link Path} to the task's temporary output directory |
| for the map-reduce job.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A base class for {@link OutputFormat}.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.FileOutputFormat --> |
| <!-- start class org.apache.hadoop.mapred.FileSplit --> |
| <class name="FileSplit" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.InputSplit"/> |
| <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, org.apache.hadoop.mapred.JobConf" |
| static="false" final="false" visibility="public" |
| deprecated="deprecated, no comment"> |
| <doc> |
| <![CDATA[Constructs a split. |
| @deprecated |
| @param file the file name |
| @param start the position of the first byte in the file to process |
| @param length the number of bytes in the file to process]]> |
| </doc> |
| </constructor> |
| <constructor name="FileSplit" type="org.apache.hadoop.fs.Path, long, long, java.lang.String[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructs a split with host information |
| |
| @param file the file name |
| @param start the position of the first byte in the file to process |
| @param length the number of bytes in the file to process |
| @param hosts the list of hosts containing the block, possibly null]]> |
| </doc> |
| </constructor> |
| <method name="getPath" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The file containing this split's data.]]> |
| </doc> |
| </method> |
| <method name="getStart" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The position of the first byte in the file to process.]]> |
| </doc> |
| </method> |
| <method name="getLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The number of bytes in the file to process.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getLocations" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A section of an input file. Returned by {@link |
| InputFormat#getSplits(JobConf, int)} and passed to |
| {@link InputFormat#getRecordReader(InputSplit,JobConf,Reporter)}.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.FileSplit --> |
| <!-- start interface org.apache.hadoop.mapred.InputFormat --> |
| <interface name="InputFormat" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="validateInput" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Check for validity of the input-specification for the job. |
| |
| <p>This method is used to validate the input directories when a job is |
| submitted so that the {@link JobClient} can fail early, with an useful |
| error message, in case of errors. For e.g. input directory does not exist. |
| </p> |
| |
| @param job job configuration. |
| @throws InvalidInputException if the job does not have valid input]]> |
| </doc> |
| </method> |
| <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="numSplits" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Logically split the set of input files for the job. |
| |
| <p>Each {@link InputSplit} is then assigned to an individual {@link Mapper} |
| for processing.</p> |
| |
| <p><i>Note</i>: The split is a <i>logical</i> split of the inputs and the |
| input files are not physically split into chunks. For e.g. a split could |
| be <i><input-file-path, start, offset></i> tuple. |
| |
| @param job job configuration. |
| @param numSplits the desired number of splits, a hint. |
| @return an array of {@link InputSplit}s for the job.]]> |
| </doc> |
| </method> |
| <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader<K, V>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="split" type="org.apache.hadoop.mapred.InputSplit"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the {@link RecordReader} for the given {@link InputSplit}. |
| |
| <p>It is the responsibility of the <code>RecordReader</code> to respect |
| record boundaries while processing the logical split to present a |
| record-oriented view to the individual task.</p> |
| |
| @param split the {@link InputSplit} |
| @param job the job that this split belongs to |
| @return a {@link RecordReader}]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[<code>InputFormat</code> describes the input-specification for a |
| Map-Reduce job. |
| |
| <p>The Map-Reduce framework relies on the <code>InputFormat</code> of the |
| job to:<p> |
| <ol> |
| <li> |
| Validate the input-specification of the job. |
| <li> |
| Split-up the input file(s) into logical {@link InputSplit}s, each of |
| which is then assigned to an individual {@link Mapper}. |
| </li> |
| <li> |
| Provide the {@link RecordReader} implementation to be used to glean |
| input records from the logical <code>InputSplit</code> for processing by |
| the {@link Mapper}. |
| </li> |
| </ol> |
| |
| <p>The default behavior of file-based {@link InputFormat}s, typically |
| sub-classes of {@link FileInputFormat}, is to split the |
| input into <i>logical</i> {@link InputSplit}s based on the total size, in |
| bytes, of the input files. However, the {@link FileSystem} blocksize of |
| the input files is treated as an upper bound for input splits. A lower bound |
| on the split size can be set via |
| <a href="{@docRoot}/../hadoop-default.html#mapred.min.split.size"> |
| mapred.min.split.size</a>.</p> |
| |
| <p>Clearly, logical splits based on input-size is insufficient for many |
| applications since record boundaries are to respected. In such cases, the |
| application has to also implement a {@link RecordReader} on whom lies the |
| responsibilty to respect record-boundaries and present a record-oriented |
| view of the logical <code>InputSplit</code> to the individual task. |
| |
| @see InputSplit |
| @see RecordReader |
| @see JobClient |
| @see FileInputFormat]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.InputFormat --> |
| <!-- start interface org.apache.hadoop.mapred.InputSplit --> |
| <interface name="InputSplit" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <method name="getLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the total number of bytes in the data of the <code>InputSplit</code>. |
| |
| @return the number of bytes in the input split. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getLocations" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the list of hostnames where the input split is located. |
| |
| @return list of hostnames where data of the <code>InputSplit</code> is |
| located as an array of <code>String</code>s. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[<code>InputSplit</code> represents the data to be processed by an |
| individual {@link Mapper}. |
| |
| <p>Typically, it presents a byte-oriented view on the input and is the |
| responsibility of {@link RecordReader} of the job to process this and present |
| a record-oriented view. |
| |
| @see InputFormat |
| @see RecordReader]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.InputSplit --> |
| <!-- start class org.apache.hadoop.mapred.InvalidFileTypeException --> |
| <class name="InvalidFileTypeException" extends="java.io.IOException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="InvalidFileTypeException" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="InvalidFileTypeException" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[Used when file type differs from the desired file type. like |
| getting a file when a directory is expected. Or a wrong file type.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.InvalidFileTypeException --> |
| <!-- start class org.apache.hadoop.mapred.InvalidInputException --> |
| <class name="InvalidInputException" extends="java.io.IOException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="InvalidInputException" type="java.util.List<java.io.IOException>" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create the exception with the given list. |
| @param probs the list of problems to report. this list is not copied.]]> |
| </doc> |
| </constructor> |
| <method name="getProblems" return="java.util.List<java.io.IOException>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the complete list of the problems reported. |
| @return the list of problems, which must not be modified]]> |
| </doc> |
| </method> |
| <method name="getMessage" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get a summary message of the problems found. |
| @return the concatenated messages from all of the problems.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class wraps a list of problems with the input, so that the user |
| can get a list of problems together instead of finding and fixing them one |
| by one.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.InvalidInputException --> |
| <!-- start class org.apache.hadoop.mapred.InvalidJobConfException --> |
| <class name="InvalidJobConfException" extends="java.io.IOException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="InvalidJobConfException" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="InvalidJobConfException" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[This exception is thrown when jobconf misses some mendatory attributes |
| or value of some attributes is invalid.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.InvalidJobConfException --> |
| <!-- start class org.apache.hadoop.mapred.IsolationRunner --> |
| <class name="IsolationRunner" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="IsolationRunner" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Run a single task |
| @param args the first argument is the task directory]]> |
| </doc> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.IsolationRunner --> |
| <!-- start class org.apache.hadoop.mapred.JobClient --> |
| <class name="JobClient" extends="org.apache.hadoop.conf.Configured" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.MRConstants"/> |
| <implements name="org.apache.hadoop.util.Tool"/> |
| <constructor name="JobClient" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a job client.]]> |
| </doc> |
| </constructor> |
| <constructor name="JobClient" type="org.apache.hadoop.mapred.JobConf" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Build a job client with the given {@link JobConf}, and connect to the |
| default {@link JobTracker}. |
| |
| @param conf the job configuration. |
| @throws IOException]]> |
| </doc> |
| </constructor> |
| <constructor name="JobClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Build a job client, connect to the indicated job tracker. |
| |
| @param jobTrackAddr the job tracker to connect to. |
| @param conf configuration.]]> |
| </doc> |
| </constructor> |
| <method name="getCommandLineConfig" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[return the command line configuration]]> |
| </doc> |
| </method> |
| <method name="init" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Connect to the default {@link JobTracker}. |
| @param conf the job configuration. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Close the <code>JobClient</code>.]]> |
| </doc> |
| </method> |
| <method name="getFs" return="org.apache.hadoop.fs.FileSystem" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get a filesystem handle. We need this to prepare jobs |
| for submission to the MapReduce system. |
| |
| @return the filesystem handle.]]> |
| </doc> |
| </method> |
| <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobFile" type="java.lang.String"/> |
| <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/> |
| <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Submit a job to the MR system. |
| |
| This returns a handle to the {@link RunningJob} which can be used to track |
| the running-job. |
| |
| @param jobFile the job configuration. |
| @return a handle to the {@link RunningJob} which can be used to track the |
| running-job. |
| @throws FileNotFoundException |
| @throws InvalidJobConfException |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <exception name="FileNotFoundException" type="java.io.FileNotFoundException"/> |
| <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Submit a job to the MR system. |
| This returns a handle to the {@link RunningJob} which can be used to track |
| the running-job. |
| |
| @param job the job configuration. |
| @return a handle to the {@link RunningJob} which can be used to track the |
| running-job. |
| @throws FileNotFoundException |
| @throws InvalidJobConfException |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getJob" return="org.apache.hadoop.mapred.RunningJob" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get an {@link RunningJob} object to track an ongoing job. Returns |
| null if the id does not correspond to any known job. |
| |
| @param jobid the jobid of the job. |
| @return the {@link RunningJob} handle to track the job, null if the |
| <code>jobid</code> doesn't correspond to any known job. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the information of the current state of the map tasks of a job. |
| |
| @param jobId the job to query. |
| @return the list of all of the map tips. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the information of the current state of the reduce tasks of a job. |
| |
| @param jobId the job to query. |
| @return the list of all of the reduce tips. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get status information about the Map-Reduce cluster. |
| |
| @return the status information about the Map-Reduce cluster as an object |
| of {@link ClusterStatus}. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the jobs that are not completed and not failed. |
| |
| @return array of {@link JobStatus} for the running/to-be-run jobs. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the jobs that are submitted. |
| |
| @return array of {@link JobStatus} for the submitted jobs. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="runJob" return="org.apache.hadoop.mapred.RunningJob" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Utility that submits a job, then polls for progress until the job is |
| complete. |
| |
| @param job the job configuration. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="setTaskOutputFilter" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/> |
| <doc> |
| <![CDATA[Sets the output filter for tasks. only those tasks are printed whose |
| output matches the filter. |
| @param newValue task filter.]]> |
| </doc> |
| </method> |
| <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Get the task output filter out of the JobConf. |
| |
| @param job the JobConf to examine. |
| @return the filter level.]]> |
| </doc> |
| </method> |
| <method name="setTaskOutputFilter" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="newValue" type="org.apache.hadoop.mapred.JobClient.TaskStatusFilter"/> |
| <doc> |
| <![CDATA[Modify the JobConf to set the task output filter. |
| |
| @param job the JobConf to modify. |
| @param newValue the value to set.]]> |
| </doc> |
| </method> |
| <method name="getTaskOutputFilter" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns task output filter. |
| @return task filter.]]> |
| </doc> |
| </method> |
| <method name="run" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| </method> |
| <doc> |
| <![CDATA[<code>JobClient</code> is the primary interface for the user-job to interact |
| with the {@link JobTracker}. |
| |
| <code>JobClient</code> provides facilities to submit jobs, track their |
| progress, access component-tasks' reports/logs, get the Map-Reduce cluster |
| status information etc. |
| |
| <p>The job submission process involves: |
| <ol> |
| <li> |
| Checking the input and output specifications of the job. |
| </li> |
| <li> |
| Computing the {@link InputSplit}s for the job. |
| </li> |
| <li> |
| Setup the requisite accounting information for the {@link DistributedCache} |
| of the job, if necessary. |
| </li> |
| <li> |
| Copying the job's jar and configuration to the map-reduce system directory |
| on the distributed file-system. |
| </li> |
| <li> |
| Submitting the job to the <code>JobTracker</code> and optionally monitoring |
| it's status. |
| </li> |
| </ol></p> |
| |
| Normally the user creates the application, describes various facets of the |
| job via {@link JobConf} and then uses the <code>JobClient</code> to submit |
| the job and monitor its progress. |
| |
| <p>Here is an example on how to use <code>JobClient</code>:</p> |
| <p><blockquote><pre> |
| // Create a new JobConf |
| JobConf job = new JobConf(new Configuration(), MyJob.class); |
| |
| // Specify various job-specific parameters |
| job.setJobName("myjob"); |
| |
| job.setInputPath(new Path("in")); |
| job.setOutputPath(new Path("out")); |
| |
| job.setMapperClass(MyJob.MyMapper.class); |
| job.setReducerClass(MyJob.MyReducer.class); |
| |
| // Submit the job, then poll for progress until the job is complete |
| JobClient.runJob(job); |
| </pre></blockquote></p> |
| |
| <h4 id="JobControl">Job Control</h4> |
| |
| <p>At times clients would chain map-reduce jobs to accomplish complex tasks |
| which cannot be done via a single map-reduce job. This is fairly easy since |
| the output of the job, typically, goes to distributed file-system and that |
| can be used as the input for the next job.</p> |
| |
| <p>However, this also means that the onus on ensuring jobs are complete |
| (success/failure) lies squarely on the clients. In such situations the |
| various job-control options are: |
| <ol> |
| <li> |
| {@link #runJob(JobConf)} : submits the job and returns only after |
| the job has completed. |
| </li> |
| <li> |
| {@link #submitJob(JobConf)} : only submits the job, then poll the |
| returned handle to the {@link RunningJob} to query status and make |
| scheduling decisions. |
| </li> |
| <li> |
| {@link JobConf#setJobEndNotificationURI(String)} : setup a notification |
| on job-completion, thus avoiding polling. |
| </li> |
| </ol></p> |
| |
| @see JobConf |
| @see ClusterStatus |
| @see Tool |
| @see DistributedCache]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobClient --> |
| <!-- start class org.apache.hadoop.mapred.JobClient.TaskStatusFilter --> |
| <class name="JobClient.TaskStatusFilter" extends="java.lang.Enum<org.apache.hadoop.mapred.JobClient.TaskStatusFilter>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.mapred.JobClient.TaskStatusFilter" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobClient.TaskStatusFilter --> |
| <!-- start class org.apache.hadoop.mapred.JobConf --> |
| <class name="JobConf" extends="org.apache.hadoop.conf.Configuration" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JobConf" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a map/reduce job configuration.]]> |
| </doc> |
| </constructor> |
| <constructor name="JobConf" type="java.lang.Class" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a map/reduce job configuration. |
| |
| @param exampleClass a class whose containing jar is used as the job's jar.]]> |
| </doc> |
| </constructor> |
| <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a map/reduce job configuration. |
| |
| @param conf a Configuration whose settings will be inherited.]]> |
| </doc> |
| </constructor> |
| <constructor name="JobConf" type="org.apache.hadoop.conf.Configuration, java.lang.Class" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a map/reduce job configuration. |
| |
| @param conf a Configuration whose settings will be inherited. |
| @param exampleClass a class whose containing jar is used as the job's jar.]]> |
| </doc> |
| </constructor> |
| <constructor name="JobConf" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a map/reduce configuration. |
| |
| @param config a Configuration-format XML job description file.]]> |
| </doc> |
| </constructor> |
| <constructor name="JobConf" type="org.apache.hadoop.fs.Path" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a map/reduce configuration. |
| |
| @param config a Configuration-format XML job description file.]]> |
| </doc> |
| </constructor> |
| <method name="getJar" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the user jar for the map-reduce job. |
| |
| @return the user jar for the map-reduce job.]]> |
| </doc> |
| </method> |
| <method name="setJar" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jar" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the user jar for the map-reduce job. |
| |
| @param jar the user jar for the map-reduce job.]]> |
| </doc> |
| </method> |
| <method name="setJarByClass" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="cls" type="java.lang.Class"/> |
| <doc> |
| <![CDATA[Set the job's jar file by finding an example class location. |
| |
| @param cls the example class.]]> |
| </doc> |
| </method> |
| <method name="getSystemDir" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the system directory where job-specific files are to be placed. |
| |
| @return the system directory where job-specific files are to be placed.]]> |
| </doc> |
| </method> |
| <method name="getLocalDirs" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="deleteLocalFiles" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="deleteLocalFiles" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="subdir" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getLocalPath" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="pathString" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Constructs a local file name. Files are distributed among configured |
| local directories.]]> |
| </doc> |
| </method> |
| <method name="setInputPath" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="Use {@link FileInputFormat#setInputPaths(JobConf, Path...)} or |
| {@link FileInputFormat#setInputPaths(JobConf, String)}"> |
| <param name="dir" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Set the {@link Path} of the input directory for the map-reduce job. |
| |
| @param dir the {@link Path} of the input directory for the map-reduce job. |
| @deprecated Use {@link FileInputFormat#setInputPaths(JobConf, Path...)} or |
| {@link FileInputFormat#setInputPaths(JobConf, String)}]]> |
| </doc> |
| </method> |
| <method name="addInputPath" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="Use {@link FileInputFormat#addInputPath(JobConf, Path)} or |
| {@link FileInputFormat#addInputPaths(JobConf, String)}"> |
| <param name="dir" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Add a {@link Path} to the list of inputs for the map-reduce job. |
| |
| @param dir {@link Path} to be added to the list of inputs for |
| the map-reduce job. |
| @deprecated Use {@link FileInputFormat#addInputPath(JobConf, Path)} or |
| {@link FileInputFormat#addInputPaths(JobConf, String)}]]> |
| </doc> |
| </method> |
| <method name="getInputPaths" return="org.apache.hadoop.fs.Path[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="Use {@link FileInputFormat#getInputPaths(JobConf)}"> |
| <doc> |
| <![CDATA[Get the list of input {@link Path}s for the map-reduce job. |
| |
| @return the list of input {@link Path}s for the map-reduce job. |
| @deprecated Use {@link FileInputFormat#getInputPaths(JobConf)}]]> |
| </doc> |
| </method> |
| <method name="getUser" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the reported username for this job. |
| |
| @return the username]]> |
| </doc> |
| </method> |
| <method name="setUser" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="user" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the reported username for this job. |
| |
| @param user the username for this job.]]> |
| </doc> |
| </method> |
| <method name="setKeepFailedTaskFiles" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="keep" type="boolean"/> |
| <doc> |
| <![CDATA[Set whether the framework should keep the intermediate files for |
| failed tasks. |
| |
| @param keep <code>true</code> if framework should keep the intermediate files |
| for failed tasks, <code>false</code> otherwise.]]> |
| </doc> |
| </method> |
| <method name="getKeepFailedTaskFiles" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Should the temporary files for failed tasks be kept? |
| |
| @return should the files be kept?]]> |
| </doc> |
| </method> |
| <method name="setKeepTaskFilesPattern" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="pattern" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set a regular expression for task names that should be kept. |
| The regular expression ".*_m_000123_0" would keep the files |
| for the first instance of map 123 that ran. |
| |
| @param pattern the java.util.regex.Pattern to match against the |
| task names.]]> |
| </doc> |
| </method> |
| <method name="getKeepTaskFilesPattern" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the regular expression that is matched against the task names |
| to see if we need to keep the files. |
| |
| @return the pattern as a string, if it was set, othewise null.]]> |
| </doc> |
| </method> |
| <method name="setWorkingDirectory" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dir" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[Set the current working directory for the default file system. |
| |
| @param dir the new current working directory.]]> |
| </doc> |
| </method> |
| <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the current working directory for the default file system. |
| |
| @return the directory name.]]> |
| </doc> |
| </method> |
| <method name="getOutputPath" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="Use {@link FileOutputFormat#getOutputPath(JobConf)} or |
| {@link FileOutputFormat#getWorkOutputPath(JobConf)} |
| Get the {@link Path} to the output directory for the map-reduce job."> |
| <doc> |
| <![CDATA[@deprecated Use {@link FileOutputFormat#getOutputPath(JobConf)} or |
| {@link FileOutputFormat#getWorkOutputPath(JobConf)} |
| Get the {@link Path} to the output directory for the map-reduce job. |
| |
| @return the {@link Path} to the output directory for the map-reduce job.]]> |
| </doc> |
| </method> |
| <method name="setOutputPath" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="Use {@link FileOutputFormat#setOutputPath(JobConf, Path)} |
| Set the {@link Path} of the output directory for the map-reduce job. |
| |
| lEsS_tHaNp>lEsS_tHaNi>NotelEsS_tHaN/i>: |
| lEsS_tHaN/p>"> |
| <param name="dir" type="org.apache.hadoop.fs.Path"/> |
| <doc> |
| <![CDATA[@deprecated Use {@link FileOutputFormat#setOutputPath(JobConf, Path)} |
| Set the {@link Path} of the output directory for the map-reduce job. |
| |
| <p><i>Note</i>: |
| </p> |
| @param dir the {@link Path} of the output directory for the map-reduce job.]]> |
| </doc> |
| </method> |
| <method name="getInputFormat" return="org.apache.hadoop.mapred.InputFormat" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the {@link InputFormat} implementation for the map-reduce job, |
| defaults to {@link TextInputFormat} if not specified explicity. |
| |
| @return the {@link InputFormat} implementation for the map-reduce job.]]> |
| </doc> |
| </method> |
| <method name="setInputFormat" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="theClass" type="java.lang.Class<? extends org.apache.hadoop.mapred.InputFormat>"/> |
| <doc> |
| <![CDATA[Set the {@link InputFormat} implementation for the map-reduce job. |
| |
| @param theClass the {@link InputFormat} implementation for the map-reduce |
| job.]]> |
| </doc> |
| </method> |
| <method name="getOutputFormat" return="org.apache.hadoop.mapred.OutputFormat" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the {@link OutputFormat} implementation for the map-reduce job, |
| defaults to {@link TextOutputFormat} if not specified explicity. |
| |
| @return the {@link OutputFormat} implementation for the map-reduce job.]]> |
| </doc> |
| </method> |
| <method name="setOutputFormat" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="theClass" type="java.lang.Class<? extends org.apache.hadoop.mapred.OutputFormat>"/> |
| <doc> |
| <![CDATA[Set the {@link OutputFormat} implementation for the map-reduce job. |
| |
| @param theClass the {@link OutputFormat} implementation for the map-reduce |
| job.]]> |
| </doc> |
| </method> |
| <method name="setCompressMapOutput" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="compress" type="boolean"/> |
| <doc> |
| <![CDATA[Should the map outputs be compressed before transfer? |
| Uses the SequenceFile compression. |
| |
| @param compress should the map outputs be compressed?]]> |
| </doc> |
| </method> |
| <method name="getCompressMapOutput" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Are the outputs of the maps be compressed? |
| |
| @return <code>true</code> if the outputs of the maps are to be compressed, |
| <code>false</code> otherwise.]]> |
| </doc> |
| </method> |
| <method name="setMapOutputCompressionType" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/> |
| <doc> |
| <![CDATA[Set the {@link CompressionType} for the map outputs. |
| |
| @param style the {@link CompressionType} to control how the map outputs |
| are compressed.]]> |
| </doc> |
| </method> |
| <method name="getMapOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the {@link CompressionType} for the map outputs. |
| |
| @return the {@link CompressionType} for map outputs, defaulting to |
| {@link CompressionType#RECORD}.]]> |
| </doc> |
| </method> |
| <method name="setMapOutputCompressorClass" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="codecClass" type="java.lang.Class<? extends org.apache.hadoop.io.compress.CompressionCodec>"/> |
| <doc> |
| <![CDATA[Set the given class as the {@link CompressionCodec} for the map outputs. |
| |
| @param codecClass the {@link CompressionCodec} class that will compress |
| the map outputs.]]> |
| </doc> |
| </method> |
| <method name="getMapOutputCompressorClass" return="java.lang.Class<? extends org.apache.hadoop.io.compress.CompressionCodec>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="defaultValue" type="java.lang.Class<? extends org.apache.hadoop.io.compress.CompressionCodec>"/> |
| <doc> |
| <![CDATA[Get the {@link CompressionCodec} for compressing the map outputs. |
| |
| @param defaultValue the {@link CompressionCodec} to return if not set |
| @return the {@link CompressionCodec} class that should be used to compress the |
| map outputs. |
| @throws IllegalArgumentException if the class was specified, but not found]]> |
| </doc> |
| </method> |
| <method name="getMapOutputKeyClass" return="java.lang.Class<?>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the key class for the map output data. If it is not set, use the |
| (final) output key class. This allows the map output key class to be |
| different than the final output key class. |
| |
| @return the map output key class.]]> |
| </doc> |
| </method> |
| <method name="setMapOutputKeyClass" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="theClass" type="java.lang.Class<?>"/> |
| <doc> |
| <![CDATA[Set the key class for the map output data. This allows the user to |
| specify the map output key class to be different than the final output |
| value class. |
| |
| @param theClass the map output key class.]]> |
| </doc> |
| </method> |
| <method name="getMapOutputValueClass" return="java.lang.Class<?>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the value class for the map output data. If it is not set, use the |
| (final) output value class This allows the map output value class to be |
| different than the final output value class. |
| |
| @return the map output value class.]]> |
| </doc> |
| </method> |
| <method name="setMapOutputValueClass" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="theClass" type="java.lang.Class<?>"/> |
| <doc> |
| <![CDATA[Set the value class for the map output data. This allows the user to |
| specify the map output value class to be different than the final output |
| value class. |
| |
| @param theClass the map output value class.]]> |
| </doc> |
| </method> |
| <method name="getOutputKeyClass" return="java.lang.Class<?>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the key class for the job output data. |
| |
| @return the key class for the job output data.]]> |
| </doc> |
| </method> |
| <method name="setOutputKeyClass" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="theClass" type="java.lang.Class<?>"/> |
| <doc> |
| <![CDATA[Set the key class for the job output data. |
| |
| @param theClass the key class for the job output data.]]> |
| </doc> |
| </method> |
| <method name="getOutputKeyComparator" return="org.apache.hadoop.io.RawComparator" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the {@link RawComparator} comparator used to compare keys. |
| |
| @return the {@link RawComparator} comparator used to compare keys.]]> |
| </doc> |
| </method> |
| <method name="setOutputKeyComparatorClass" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="theClass" type="java.lang.Class<? extends org.apache.hadoop.io.RawComparator>"/> |
| <doc> |
| <![CDATA[Set the {@link RawComparator} comparator used to compare keys. |
| |
| @param theClass the {@link RawComparator} comparator used to |
| compare keys. |
| @see #setOutputValueGroupingComparator(Class)]]> |
| </doc> |
| </method> |
| <method name="getOutputValueGroupingComparator" return="org.apache.hadoop.io.RawComparator" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the user defined {@link WritableComparable} comparator for |
| grouping keys of inputs to the reduce. |
| |
| @return comparator set by the user for grouping values. |
| @see #setOutputValueGroupingComparator(Class) for details.]]> |
| </doc> |
| </method> |
| <method name="setOutputValueGroupingComparator" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="theClass" type="java.lang.Class<? extends org.apache.hadoop.io.RawComparator>"/> |
| <doc> |
| <![CDATA[Set the user defined {@link RawComparator} comparator for |
| grouping keys in the input to the reduce. |
| |
| <p>This comparator should be provided if the equivalence rules for keys |
| for sorting the intermediates are different from those for grouping keys |
| before each call to |
| {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p> |
| |
| <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed |
| in a single call to the reduce function if K1 and K2 compare as equal.</p> |
| |
| <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control |
| how keys are sorted, this can be used in conjunction to simulate |
| <i>secondary sort on values</i>.</p> |
| |
| <p><i>Note</i>: This is not a guarantee of the reduce sort being |
| <i>stable</i> in any sense. (In any case, with the order of available |
| map-outputs to the reduce being non-deterministic, it wouldn't make |
| that much sense.)</p> |
| |
| @param theClass the comparator class to be used for grouping keys. |
| It should implement <code>RawComparator</code>. |
| @see #setOutputKeyComparatorClass(Class)]]> |
| </doc> |
| </method> |
| <method name="getOutputValueClass" return="java.lang.Class<?>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the value class for job outputs. |
| |
| @return the value class for job outputs.]]> |
| </doc> |
| </method> |
| <method name="setOutputValueClass" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="theClass" type="java.lang.Class<?>"/> |
| <doc> |
| <![CDATA[Set the value class for job outputs. |
| |
| @param theClass the value class for job outputs.]]> |
| </doc> |
| </method> |
| <method name="getMapperClass" return="java.lang.Class<? extends org.apache.hadoop.mapred.Mapper>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the {@link Mapper} class for the job. |
| |
| @return the {@link Mapper} class for the job.]]> |
| </doc> |
| </method> |
| <method name="setMapperClass" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="theClass" type="java.lang.Class<? extends org.apache.hadoop.mapred.Mapper>"/> |
| <doc> |
| <![CDATA[Set the {@link Mapper} class for the job. |
| |
| @param theClass the {@link Mapper} class for the job.]]> |
| </doc> |
| </method> |
| <method name="getMapRunnerClass" return="java.lang.Class<? extends org.apache.hadoop.mapred.MapRunnable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the {@link MapRunnable} class for the job. |
| |
| @return the {@link MapRunnable} class for the job.]]> |
| </doc> |
| </method> |
| <method name="setMapRunnerClass" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="theClass" type="java.lang.Class<? extends org.apache.hadoop.mapred.MapRunnable>"/> |
| <doc> |
| <![CDATA[Expert: Set the {@link MapRunnable} class for the job. |
| |
| Typically used to exert greater control on {@link Mapper}s. |
| |
| @param theClass the {@link MapRunnable} class for the job.]]> |
| </doc> |
| </method> |
| <method name="getPartitionerClass" return="java.lang.Class<? extends org.apache.hadoop.mapred.Partitioner>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the {@link Partitioner} used to partition {@link Mapper}-outputs |
| to be sent to the {@link Reducer}s. |
| |
| @return the {@link Partitioner} used to partition map-outputs.]]> |
| </doc> |
| </method> |
| <method name="setPartitionerClass" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="theClass" type="java.lang.Class<? extends org.apache.hadoop.mapred.Partitioner>"/> |
| <doc> |
| <![CDATA[Set the {@link Partitioner} class used to partition |
| {@link Mapper}-outputs to be sent to the {@link Reducer}s. |
| |
| @param theClass the {@link Partitioner} used to partition map-outputs.]]> |
| </doc> |
| </method> |
| <method name="getReducerClass" return="java.lang.Class<? extends org.apache.hadoop.mapred.Reducer>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the {@link Reducer} class for the job. |
| |
| @return the {@link Reducer} class for the job.]]> |
| </doc> |
| </method> |
| <method name="setReducerClass" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="theClass" type="java.lang.Class<? extends org.apache.hadoop.mapred.Reducer>"/> |
| <doc> |
| <![CDATA[Set the {@link Reducer} class for the job. |
| |
| @param theClass the {@link Reducer} class for the job.]]> |
| </doc> |
| </method> |
| <method name="getCombinerClass" return="java.lang.Class<? extends org.apache.hadoop.mapred.Reducer>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the user-defined <i>combiner</i> class used to combine map-outputs |
| before being sent to the reducers. Typically the combiner is same as the |
| the {@link Reducer} for the job i.e. {@link #getReducerClass()}. |
| |
| @return the user-defined combiner class used to combine map-outputs.]]> |
| </doc> |
| </method> |
| <method name="setCombinerClass" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="theClass" type="java.lang.Class<? extends org.apache.hadoop.mapred.Reducer>"/> |
| <doc> |
| <![CDATA[Set the user-defined <i>combiner</i> class used to combine map-outputs |
| before being sent to the reducers. |
| |
| <p>The combiner is a task-level aggregation operation which, in some cases, |
| helps to cut down the amount of data transferred from the {@link Mapper} to |
| the {@link Reducer}, leading to better performance.</p> |
| |
| <p>Typically the combiner is same as the <code>Reducer</code> for the |
| job i.e. {@link #setReducerClass(Class)}.</p> |
| |
| @param theClass the user-defined combiner class used to combine |
| map-outputs.]]> |
| </doc> |
| </method> |
| <method name="getSpeculativeExecution" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Should speculative execution be used for this job? |
| Defaults to <code>true</code>. |
| |
| @return <code>true</code> if speculative execution be used for this job, |
| <code>false</code> otherwise.]]> |
| </doc> |
| </method> |
| <method name="setSpeculativeExecution" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="speculativeExecution" type="boolean"/> |
| <doc> |
| <![CDATA[Turn speculative execution on or off for this job. |
| |
| @param speculativeExecution <code>true</code> if speculative execution |
| should be turned on, else <code>false</code>.]]> |
| </doc> |
| </method> |
| <method name="getMapSpeculativeExecution" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Should speculative execution be used for this job for map tasks? |
| Defaults to <code>true</code>. |
| |
| @return <code>true</code> if speculative execution be |
| used for this job for map tasks, |
| <code>false</code> otherwise.]]> |
| </doc> |
| </method> |
| <method name="setMapSpeculativeExecution" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="speculativeExecution" type="boolean"/> |
| <doc> |
| <![CDATA[Turn speculative execution on or off for this job for map tasks. |
| |
| @param speculativeExecution <code>true</code> if speculative execution |
| should be turned on for map tasks, |
| else <code>false</code>.]]> |
| </doc> |
| </method> |
| <method name="getReduceSpeculativeExecution" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Should speculative execution be used for this job for reduce tasks? |
| Defaults to <code>true</code>. |
| |
| @return <code>true</code> if speculative execution be used |
| for reduce tasks for this job, |
| <code>false</code> otherwise.]]> |
| </doc> |
| </method> |
| <method name="setReduceSpeculativeExecution" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="speculativeExecution" type="boolean"/> |
| <doc> |
| <![CDATA[Turn speculative execution on or off for this job for reduce tasks. |
| |
| @param speculativeExecution <code>true</code> if speculative execution |
| should be turned on for reduce tasks, |
| else <code>false</code>.]]> |
| </doc> |
| </method> |
| <method name="getNumMapTasks" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get configured the number of reduce tasks for this job. |
| Defaults to <code>1</code>. |
| |
| @return the number of reduce tasks for this job.]]> |
| </doc> |
| </method> |
| <method name="setNumMapTasks" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="n" type="int"/> |
| <doc> |
| <![CDATA[Set the number of map tasks for this job. |
| |
| <p><i>Note</i>: This is only a <i>hint</i> to the framework. The actual |
| number of spawned map tasks depends on the number of {@link InputSplit}s |
| generated by the job's {@link InputFormat#getSplits(JobConf, int)}. |
| |
| A custom {@link InputFormat} is typically used to accurately control |
| the number of map tasks for the job.</p> |
| |
| <h4 id="NoOfMaps">How many maps?</h4> |
| |
| <p>The number of maps is usually driven by the total size of the inputs |
| i.e. total number of blocks of the input files.</p> |
| |
| <p>The right level of parallelism for maps seems to be around 10-100 maps |
| per-node, although it has been set up to 300 or so for very cpu-light map |
| tasks. Task setup takes awhile, so it is best if the maps take at least a |
| minute to execute.</p> |
| |
| <p>The default behavior of file-based {@link InputFormat}s is to split the |
| input into <i>logical</i> {@link InputSplit}s based on the total size, in |
| bytes, of input files. However, the {@link FileSystem} blocksize of the |
| input files is treated as an upper bound for input splits. A lower bound |
| on the split size can be set via |
| <a href="{@docRoot}/../hadoop-default.html#mapred.min.split.size"> |
| mapred.min.split.size</a>.</p> |
| |
| <p>Thus, if you expect 10TB of input data and have a blocksize of 128MB, |
| you'll end up with 82,000 maps, unless {@link #setNumMapTasks(int)} is |
| used to set it even higher.</p> |
| |
| @param n the number of map tasks for this job. |
| @see InputFormat#getSplits(JobConf, int) |
| @see FileInputFormat |
| @see FileSystem#getDefaultBlockSize() |
| @see FileStatus#getBlockSize()]]> |
| </doc> |
| </method> |
| <method name="getNumReduceTasks" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get configured the number of reduce tasks for this job. Defaults to |
| <code>1</code>. |
| |
| @return the number of reduce tasks for this job.]]> |
| </doc> |
| </method> |
| <method name="setNumReduceTasks" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="n" type="int"/> |
| <doc> |
| <![CDATA[Set the requisite number of reduce tasks for this job. |
| |
| <h4 id="NoOfReduces">How many reduces?</h4> |
| |
| <p>The right number of reduces seems to be <code>0.95</code> or |
| <code>1.75</code> multiplied by (<<i>no. of nodes</i>> * |
| <a href="{@docRoot}/../hadoop-default.html#mapred.tasktracker.reduce.tasks.maximum"> |
| mapred.tasktracker.reduce.tasks.maximum</a>). |
| </p> |
| |
| <p>With <code>0.95</code> all of the reduces can launch immediately and |
| start transfering map outputs as the maps finish. With <code>1.75</code> |
| the faster nodes will finish their first round of reduces and launch a |
| second wave of reduces doing a much better job of load balancing.</p> |
| |
| <p>Increasing the number of reduces increases the framework overhead, but |
| increases load balancing and lowers the cost of failures.</p> |
| |
| <p>The scaling factors above are slightly less than whole numbers to |
| reserve a few reduce slots in the framework for speculative-tasks, failures |
| etc.</p> |
| |
| <h4 id="ReducerNone">Reducer NONE</h4> |
| |
| <p>It is legal to set the number of reduce-tasks to <code>zero</code>.</p> |
| |
| <p>In this case the output of the map-tasks directly go to distributed |
| file-system, to the path set by |
| {@link FileOutputFormat#setOutputPath(JobConf, Path)}. Also, the |
| framework doesn't sort the map-outputs before writing it out to HDFS.</p> |
| |
| @param n the number of reduce tasks for this job.]]> |
| </doc> |
| </method> |
| <method name="getMaxMapAttempts" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the configured number of maximum attempts that will be made to run a |
| map task, as specified by the <code>mapred.map.max.attempts</code> |
| property. If this property is not already set, the default is 4 attempts. |
| |
| @return the max number of attempts per map task.]]> |
| </doc> |
| </method> |
| <method name="setMaxMapAttempts" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="n" type="int"/> |
| <doc> |
| <![CDATA[Expert: Set the number of maximum attempts that will be made to run a |
| map task. |
| |
| @param n the number of attempts per map task.]]> |
| </doc> |
| </method> |
| <method name="getMaxReduceAttempts" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the configured number of maximum attempts that will be made to run a |
| reduce task, as specified by the <code>mapred.reduce.max.attempts</code> |
| property. If this property is not already set, the default is 4 attempts. |
| |
| @return the max number of attempts per reduce task.]]> |
| </doc> |
| </method> |
| <method name="setMaxReduceAttempts" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="n" type="int"/> |
| <doc> |
| <![CDATA[Expert: Set the number of maximum attempts that will be made to run a |
| reduce task. |
| |
| @param n the number of attempts per reduce task.]]> |
| </doc> |
| </method> |
| <method name="getJobName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the user-specified job name. This is only used to identify the |
| job to the user. |
| |
| @return the job's name, defaulting to "".]]> |
| </doc> |
| </method> |
| <method name="setJobName" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the user-specified job name. |
| |
| @param name the job's new name.]]> |
| </doc> |
| </method> |
| <method name="getSessionId" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the user-specified session identifier. The default is the empty string. |
| |
| The session identifier is used to tag metric data that is reported to some |
| performance metrics system via the org.apache.hadoop.metrics API. The |
| session identifier is intended, in particular, for use by Hadoop-On-Demand |
| (HOD) which allocates a virtual Hadoop cluster dynamically and transiently. |
| HOD will set the session identifier by modifying the hadoop-site.xml file |
| before starting the cluster. |
| |
| When not running under HOD, this identifer is expected to remain set to |
| the empty string. |
| |
| @return the session identifier, defaulting to "".]]> |
| </doc> |
| </method> |
| <method name="setSessionId" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="sessionId" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the user-specified session identifier. |
| |
| @param sessionId the new session id.]]> |
| </doc> |
| </method> |
| <method name="setMaxTaskFailuresPerTracker" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="noFailures" type="int"/> |
| <doc> |
| <![CDATA[Set the maximum no. of failures of a given job per tasktracker. |
| If the no. of task failures exceeds <code>noFailures</code>, the |
| tasktracker is <i>blacklisted</i> for this job. |
| |
| @param noFailures maximum no. of failures of a given job per tasktracker.]]> |
| </doc> |
| </method> |
| <method name="getMaxTaskFailuresPerTracker" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Expert: Get the maximum no. of failures of a given job per tasktracker. |
| If the no. of task failures exceeds this, the tasktracker is |
| <i>blacklisted</i> for this job. |
| |
| @return the maximum no. of failures of a given job per tasktracker.]]> |
| </doc> |
| </method> |
| <method name="getMaxMapTaskFailuresPercent" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the maximum percentage of map tasks that can fail without |
| the job being aborted. |
| |
| Each map task is executed a minimum of {@link #getMaxMapAttempts()} |
| attempts before being declared as <i>failed</i>. |
| |
| Defaults to <code>zero</code>, i.e. <i>any</i> failed map-task results in |
| the job being declared as {@link JobStatus#FAILED}. |
| |
| @return the maximum percentage of map tasks that can fail without |
| the job being aborted.]]> |
| </doc> |
| </method> |
| <method name="setMaxMapTaskFailuresPercent" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="percent" type="int"/> |
| <doc> |
| <![CDATA[Expert: Set the maximum percentage of map tasks that can fail without the |
| job being aborted. |
| |
| Each map task is executed a minimum of {@link #getMaxMapAttempts} attempts |
| before being declared as <i>failed</i>. |
| |
| @param percent the maximum percentage of map tasks that can fail without |
| the job being aborted.]]> |
| </doc> |
| </method> |
| <method name="getMaxReduceTaskFailuresPercent" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the maximum percentage of reduce tasks that can fail without |
| the job being aborted. |
| |
| Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()} |
| attempts before being declared as <i>failed</i>. |
| |
| Defaults to <code>zero</code>, i.e. <i>any</i> failed reduce-task results |
| in the job being declared as {@link JobStatus#FAILED}. |
| |
| @return the maximum percentage of reduce tasks that can fail without |
| the job being aborted.]]> |
| </doc> |
| </method> |
| <method name="setMaxReduceTaskFailuresPercent" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="percent" type="int"/> |
| <doc> |
| <![CDATA[Set the maximum percentage of reduce tasks that can fail without the job |
| being aborted. |
| |
| Each reduce task is executed a minimum of {@link #getMaxReduceAttempts()} |
| attempts before being declared as <i>failed</i>. |
| |
| @param percent the maximum percentage of reduce tasks that can fail without |
| the job being aborted.]]> |
| </doc> |
| </method> |
| <method name="setJobPriority" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="prio" type="org.apache.hadoop.mapred.JobPriority"/> |
| <doc> |
| <![CDATA[Set {@link JobPriority} for this job. |
| |
| @param prio the {@link JobPriority} for this job.]]> |
| </doc> |
| </method> |
| <method name="getJobPriority" return="org.apache.hadoop.mapred.JobPriority" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the {@link JobPriority} for this job. |
| |
| @return the {@link JobPriority} for this job.]]> |
| </doc> |
| </method> |
| <method name="getProfileEnabled" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get whether the task profiling is enabled. |
| @return true if some tasks will be profiled]]> |
| </doc> |
| </method> |
| <method name="setProfileEnabled" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="newValue" type="boolean"/> |
| <doc> |
| <![CDATA[Set whether the system should collect profiler information for some of |
| the tasks in this job? The information is stored in the user log |
| directory. |
| @param newValue true means it should be gathered]]> |
| </doc> |
| </method> |
| <method name="getProfileParams" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the profiler configuration arguments. |
| |
| The default value for this property is |
| "-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s" |
| |
| @return the parameters to pass to the task child to configure profiling]]> |
| </doc> |
| </method> |
| <method name="setProfileParams" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="value" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the profiler configuration arguments. If the string contains a '%s' it |
| will be replaced with the name of the profiling output file when the task |
| runs. |
| |
| This value is passed to the task child JVM on the command line. |
| |
| @param value the configuration string]]> |
| </doc> |
| </method> |
| <method name="getProfileTaskRange" return="org.apache.hadoop.conf.Configuration.IntegerRanges" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="isMap" type="boolean"/> |
| <doc> |
| <![CDATA[Get the range of maps or reduces to profile. |
| @param isMap is the task a map? |
| @return the task ranges]]> |
| </doc> |
| </method> |
| <method name="setProfileTaskRange" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="isMap" type="boolean"/> |
| <param name="newValue" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the ranges of maps or reduces to profile. setProfileEnabled(true) |
| must also be called. |
| @param newValue a set of integer ranges of the map ids]]> |
| </doc> |
| </method> |
| <method name="setMapDebugScript" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="mDbgScript" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the debug script to run when the map tasks fail. |
| |
| <p>The debug script can aid debugging of failed map tasks. The script is |
| given task's stdout, stderr, syslog, jobconf files as arguments.</p> |
| |
| <p>The debug command, run on the node where the map failed, is:</p> |
| <p><pre><blockquote> |
| $script $stdout $stderr $syslog $jobconf. |
| </blockquote></pre></p> |
| |
| <p> The script file is distributed through {@link DistributedCache} |
| APIs. The script needs to be symlinked. </p> |
| |
| <p>Here is an example on how to submit a script |
| <p><blockquote><pre> |
| job.setMapDebugScript("./myscript"); |
| DistributedCache.createSymlink(job); |
| DistributedCache.addCacheFile("/debug/scripts/myscript#myscript"); |
| </pre></blockquote></p> |
| |
| @param mDbgScript the script name]]> |
| </doc> |
| </method> |
| <method name="getMapDebugScript" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the map task's debug script. |
| |
| @return the debug Script for the mapred job for failed map tasks. |
| @see #setMapDebugScript(String)]]> |
| </doc> |
| </method> |
| <method name="setReduceDebugScript" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="rDbgScript" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the debug script to run when the reduce tasks fail. |
| |
| <p>The debug script can aid debugging of failed reduce tasks. The script |
| is given task's stdout, stderr, syslog, jobconf files as arguments.</p> |
| |
| <p>The debug command, run on the node where the map failed, is:</p> |
| <p><pre><blockquote> |
| $script $stdout $stderr $syslog $jobconf. |
| </blockquote></pre></p> |
| |
| <p> The script file is distributed through {@link DistributedCache} |
| APIs. The script file needs to be symlinked </p> |
| |
| <p>Here is an example on how to submit a script |
| <p><blockquote><pre> |
| job.setReduceDebugScript("./myscript"); |
| DistributedCache.createSymlink(job); |
| DistributedCache.addCacheFile("/debug/scripts/myscript#myscript"); |
| </pre></blockquote></p> |
| |
| @param rDbgScript the script name]]> |
| </doc> |
| </method> |
| <method name="getReduceDebugScript" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the reduce task's debug Script |
| |
| @return the debug script for the mapred job for failed reduce tasks. |
| @see #setReduceDebugScript(String)]]> |
| </doc> |
| </method> |
| <method name="getJobEndNotificationURI" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the uri to be invoked in-order to send a notification after the job |
| has completed (success/failure). |
| |
| @return the job end notification uri, <code>null</code> if it hasn't |
| been set. |
| @see #setJobEndNotificationURI(String)]]> |
| </doc> |
| </method> |
| <method name="setJobEndNotificationURI" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="uri" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the uri to be invoked in-order to send a notification after the job |
| has completed (success/failure). |
| |
| <p>The uri can contain 2 special parameters: <tt>$jobId</tt> and |
| <tt>$jobStatus</tt>. Those, if present, are replaced by the job's |
| identifier and completion-status respectively.</p> |
| |
| <p>This is typically used by application-writers to implement chaining of |
| Map-Reduce jobs in an <i>asynchronous manner</i>.</p> |
| |
| @param uri the job end notification uri |
| @see JobStatus |
| @see <a href="{@docRoot}/org/apache/hadoop/mapred/JobClient.html#JobCompletionAndChaining">Job Completion and Chaining</a>]]> |
| </doc> |
| </method> |
| <method name="getJobLocalDir" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get job-specific shared directory for use as scratch space |
| |
| <p> |
| When a job starts, a shared directory is created at location |
| <code> |
| ${mapred.local.dir}/taskTracker/jobcache/$jobid/work/ </code>. |
| This directory is exposed to the users through |
| <code>job.local.dir </code>. |
| So, the tasks can use this space |
| as scratch space and share files among them. </p> |
| This value is available as System property also. |
| |
| @return The localized job specific shared directory]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A map/reduce job configuration. |
| |
| <p><code>JobConf</code> is the primary interface for a user to describe a |
| map-reduce job to the Hadoop framework for execution. The framework tries to |
| faithfully execute the job as-is described by <code>JobConf</code>, however: |
| <ol> |
| <li> |
| Some configuration parameters might have been marked as |
| <a href="{@docRoot}/org/apache/hadoop/conf/Configuration.html#FinalParams"> |
| final</a> by administrators and hence cannot be altered. |
| </li> |
| <li> |
| While some job parameters are straight-forward to set |
| (e.g. {@link #setNumReduceTasks(int)}), some parameters interact subtly |
| rest of the framework and/or job-configuration and is relatively more |
| complex for the user to control finely (e.g. {@link #setNumMapTasks(int)}). |
| </li> |
| </ol></p> |
| |
| <p><code>JobConf</code> typically specifies the {@link Mapper}, combiner |
| (if any), {@link Partitioner}, {@link Reducer}, {@link InputFormat} and |
| {@link OutputFormat} implementations to be used etc. |
| |
| <p>Optionally <code>JobConf</code> is used to specify other advanced facets |
| of the job such as <code>Comparator</code>s to be used, files to be put in |
| the {@link DistributedCache}, whether or not intermediate and/or job outputs |
| are to be compressed (and how), debugability via user-provided scripts |
| ( {@link #setMapDebugScript(String)}/{@link #setReduceDebugScript(String)}), |
| for doing post-processing on task logs, task's stdout, stderr, syslog. |
| and etc.</p> |
| |
| <p>Here is an example on how to configure a job via <code>JobConf</code>:</p> |
| <p><blockquote><pre> |
| // Create a new JobConf |
| JobConf job = new JobConf(new Configuration(), MyJob.class); |
| |
| // Specify various job-specific parameters |
| job.setJobName("myjob"); |
| |
| FileInputFormat.setInputPaths(job, new Path("in")); |
| FileOutputFormat.setOutputPath(job, new Path("out")); |
| |
| job.setMapperClass(MyJob.MyMapper.class); |
| job.setCombinerClass(MyJob.MyReducer.class); |
| job.setReducerClass(MyJob.MyReducer.class); |
| |
| job.setInputFormat(SequenceFileInputFormat.class); |
| job.setOutputFormat(SequenceFileOutputFormat.class); |
| </pre></blockquote></p> |
| |
| @see JobClient |
| @see ClusterStatus |
| @see Tool |
| @see DistributedCache]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobConf --> |
| <!-- start interface org.apache.hadoop.mapred.JobConfigurable --> |
| <interface name="JobConfigurable" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Initializes a new instance from a {@link JobConf}. |
| |
| @param job the configuration]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[That what may be configured.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.JobConfigurable --> |
| <!-- start class org.apache.hadoop.mapred.JobEndNotifier --> |
| <class name="JobEndNotifier" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JobEndNotifier" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="startNotifier" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="stopNotifier" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="registerNotification" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="status" type="org.apache.hadoop.mapred.JobStatus"/> |
| </method> |
| <method name="localRunnerNotification" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="status" type="org.apache.hadoop.mapred.JobStatus"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobEndNotifier --> |
| <!-- start class org.apache.hadoop.mapred.JobHistory --> |
| <class name="JobHistory" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JobHistory" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="init" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="hostname" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Initialize JobHistory files. |
| @param conf Jobconf of the job tracker. |
| @param hostname jobtracker's hostname |
| @return true if intialized properly |
| false otherwise]]> |
| </doc> |
| </method> |
| <method name="parseHistoryFromFS" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="java.lang.String"/> |
| <param name="l" type="org.apache.hadoop.mapred.JobHistory.Listener"/> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Parses history file and invokes Listener.handle() for |
| each line of history. It can be used for looking through history |
| files for specific items without having to keep whole history in memory. |
| @param path path to history file |
| @param l Listener for history events |
| @param fs FileSystem where history file is present |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="isDisableHistory" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns history disable status. by default history is enabled so this |
| method returns false. |
| @return true if history logging is disabled, false otherwise.]]> |
| </doc> |
| </method> |
| <method name="setDisableHistory" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="disableHistory" type="boolean"/> |
| <doc> |
| <![CDATA[Enable/disable history logging. Default value is false, so history |
| is enabled by default. |
| @param disableHistory true if history should be disabled, false otherwise.]]> |
| </doc> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="JOBTRACKER_START_TIME" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Provides methods for writing to and reading from job history. |
| Job History works in an append mode, JobHistory and its inner classes provide methods |
| to log job events. |
| |
| JobHistory is split into multiple files, format of each file is plain text where each line |
| is of the format [type (key=value)*], where type identifies the type of the record. |
| Type maps to UID of one of the inner classes of this class. |
| |
| Job history is maintained in a master index which contains star/stop times of all jobs with |
| a few other job level properties. Apart from this each job's history is maintained in a seperate history |
| file. name of job history files follows the format jobtrackerId_jobid |
| |
| For parsing the job history it supports a listener based interface where each line is parsed |
| and passed to listener. The listener can create an object model of history or look for specific |
| events and discard rest of the history.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobHistory --> |
| <!-- start class org.apache.hadoop.mapred.JobHistory.HistoryCleaner --> |
| <class name="JobHistory.HistoryCleaner" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.lang.Runnable"/> |
| <constructor name="JobHistory.HistoryCleaner" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="run" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Cleans up history data.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Delete history files older than one month. Update master index and remove all |
| jobs older than one month. Also if a job tracker has no jobs in last one month |
| remove reference to the job tracker.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobHistory.HistoryCleaner --> |
| <!-- start class org.apache.hadoop.mapred.JobHistory.JobInfo --> |
| <class name="JobHistory.JobInfo" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JobHistory.JobInfo" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create new JobInfo]]> |
| </doc> |
| </constructor> |
| <method name="getAllTasks" return="java.util.Map<java.lang.String, org.apache.hadoop.mapred.JobHistory.Task>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns all map and reduce tasks <taskid-Task>.]]> |
| </doc> |
| </method> |
| <method name="getLocalJobFilePath" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Get the path of the locally stored job file |
| @param jobId id of the job |
| @return the path of the job file on the local file system]]> |
| </doc> |
| </method> |
| <method name="encodeJobHistoryFilePath" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="logFile" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Helper function to encode the URL of the path of the job-history |
| log file. |
| |
| @param logFile path of the job-history file |
| @return URL encoded path |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="encodeJobHistoryFileName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="logFileName" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Helper function to encode the URL of the filename of the job-history |
| log file. |
| |
| @param logFileName file name of the job-history file |
| @return URL encoded filename |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="decodeJobHistoryFileName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="logFileName" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Helper function to decode the URL of the filename of the job-history |
| log file. |
| |
| @param logFileName file name of the job-history file |
| @return URL decoded filename |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="logSubmitted" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="jobConfPath" type="java.lang.String"/> |
| <param name="submitTime" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Log job submitted event to history. Creates a new file in history |
| for the job. if history file creation fails, it disables history |
| for all other events. |
| @param jobId job id assigned by job tracker. |
| @param jobConf job conf of the job |
| @param jobConfPath path to job conf xml file in HDFS. |
| @param submitTime time when job tracker received the job |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="logStarted" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="startTime" type="long"/> |
| <param name="totalMaps" type="int"/> |
| <param name="totalReduces" type="int"/> |
| <doc> |
| <![CDATA[Logs launch time of job. |
| @param jobId job id, assigned by jobtracker. |
| @param startTime start time of job. |
| @param totalMaps total maps assigned by jobtracker. |
| @param totalReduces total reduces.]]> |
| </doc> |
| </method> |
| <method name="logFinished" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="finishTime" type="long"/> |
| <param name="finishedMaps" type="int"/> |
| <param name="finishedReduces" type="int"/> |
| <param name="failedMaps" type="int"/> |
| <param name="failedReduces" type="int"/> |
| <param name="counters" type="org.apache.hadoop.mapred.Counters"/> |
| <doc> |
| <![CDATA[Log job finished. closes the job file in history. |
| @param jobId job id, assigned by jobtracker. |
| @param finishTime finish time of job in ms. |
| @param finishedMaps no of maps successfully finished. |
| @param finishedReduces no of reduces finished sucessfully. |
| @param failedMaps no of failed map tasks. |
| @param failedReduces no of failed reduce tasks. |
| @param counters the counters from the job]]> |
| </doc> |
| </method> |
| <method name="logFailed" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| <param name="timestamp" type="long"/> |
| <param name="finishedMaps" type="int"/> |
| <param name="finishedReduces" type="int"/> |
| <doc> |
| <![CDATA[Logs job failed event. Closes the job history log file. |
| @param jobid job id |
| @param timestamp time when job failure was detected in ms. |
| @param finishedMaps no finished map tasks. |
| @param finishedReduces no of finished reduce tasks.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Helper class for logging or reading back events related to job start, finish or failure.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobHistory.JobInfo --> |
| <!-- start class org.apache.hadoop.mapred.JobHistory.Keys --> |
| <class name="JobHistory.Keys" extends="java.lang.Enum<org.apache.hadoop.mapred.JobHistory.Keys>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.mapred.JobHistory.Keys[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Keys" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| <doc> |
| <![CDATA[Job history files contain key="value" pairs, where keys belong to this enum. |
| It acts as a global namespace for all keys.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobHistory.Keys --> |
| <!-- start interface org.apache.hadoop.mapred.JobHistory.Listener --> |
| <interface name="JobHistory.Listener" abstract="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="handle" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="recType" type="org.apache.hadoop.mapred.JobHistory.RecordTypes"/> |
| <param name="values" type="java.util.Map<org.apache.hadoop.mapred.JobHistory.Keys, java.lang.String>"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Callback method for history parser. |
| @param recType type of record, which is the first entry in the line. |
| @param values a map of key-value pairs as thry appear in history. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Callback interface for reading back log events from JobHistory. This interface |
| should be implemented and passed to JobHistory.parseHistory()]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.JobHistory.Listener --> |
| <!-- start class org.apache.hadoop.mapred.JobHistory.MapAttempt --> |
| <class name="JobHistory.MapAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JobHistory.MapAttempt" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="logStarted" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="taskId" type="java.lang.String"/> |
| <param name="taskAttemptId" type="java.lang.String"/> |
| <param name="startTime" type="long"/> |
| <param name="hostName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Log start time of this map task attempt. |
| @param jobId job id |
| @param taskId task id |
| @param taskAttemptId task attempt id |
| @param startTime start time of task attempt as reported by task tracker. |
| @param hostName host name of the task attempt.]]> |
| </doc> |
| </method> |
| <method name="logFinished" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="taskId" type="java.lang.String"/> |
| <param name="taskAttemptId" type="java.lang.String"/> |
| <param name="finishTime" type="long"/> |
| <param name="hostName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Log finish time of map task attempt. |
| @param jobId job id |
| @param taskId task id |
| @param taskAttemptId task attempt id |
| @param finishTime finish time |
| @param hostName host name]]> |
| </doc> |
| </method> |
| <method name="logFailed" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="taskId" type="java.lang.String"/> |
| <param name="taskAttemptId" type="java.lang.String"/> |
| <param name="timestamp" type="long"/> |
| <param name="hostName" type="java.lang.String"/> |
| <param name="error" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Log task attempt failed event. |
| @param jobId jobid |
| @param taskId taskid |
| @param taskAttemptId task attempt id |
| @param timestamp timestamp |
| @param hostName hostname of this task attempt. |
| @param error error message if any for this task attempt.]]> |
| </doc> |
| </method> |
| <method name="logKilled" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="taskId" type="java.lang.String"/> |
| <param name="taskAttemptId" type="java.lang.String"/> |
| <param name="timestamp" type="long"/> |
| <param name="hostName" type="java.lang.String"/> |
| <param name="error" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Log task attempt killed event. |
| @param jobId jobid |
| @param taskId taskid |
| @param taskAttemptId task attempt id |
| @param timestamp timestamp |
| @param hostName hostname of this task attempt. |
| @param error error message if any for this task attempt.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Helper class for logging or reading back events related to start, finish or failure of |
| a Map Attempt on a node.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobHistory.MapAttempt --> |
| <!-- start class org.apache.hadoop.mapred.JobHistory.RecordTypes --> |
| <class name="JobHistory.RecordTypes" extends="java.lang.Enum<org.apache.hadoop.mapred.JobHistory.RecordTypes>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.mapred.JobHistory.RecordTypes[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.RecordTypes" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| <doc> |
| <![CDATA[Record types are identifiers for each line of log in history files. |
| A record type appears as the first token in a single line of log.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobHistory.RecordTypes --> |
| <!-- start class org.apache.hadoop.mapred.JobHistory.ReduceAttempt --> |
| <class name="JobHistory.ReduceAttempt" extends="org.apache.hadoop.mapred.JobHistory.TaskAttempt" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JobHistory.ReduceAttempt" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="logStarted" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="taskId" type="java.lang.String"/> |
| <param name="taskAttemptId" type="java.lang.String"/> |
| <param name="startTime" type="long"/> |
| <param name="hostName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Log start time of Reduce task attempt. |
| @param jobId job id |
| @param taskId task id (tip) |
| @param taskAttemptId task attempt id |
| @param startTime start time |
| @param hostName host name]]> |
| </doc> |
| </method> |
| <method name="logFinished" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="taskId" type="java.lang.String"/> |
| <param name="taskAttemptId" type="java.lang.String"/> |
| <param name="shuffleFinished" type="long"/> |
| <param name="sortFinished" type="long"/> |
| <param name="finishTime" type="long"/> |
| <param name="hostName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Log finished event of this task. |
| @param jobId job id |
| @param taskId task id |
| @param taskAttemptId task attempt id |
| @param shuffleFinished shuffle finish time |
| @param sortFinished sort finish time |
| @param finishTime finish time of task |
| @param hostName host name where task attempt executed]]> |
| </doc> |
| </method> |
| <method name="logFailed" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="taskId" type="java.lang.String"/> |
| <param name="taskAttemptId" type="java.lang.String"/> |
| <param name="timestamp" type="long"/> |
| <param name="hostName" type="java.lang.String"/> |
| <param name="error" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Log failed reduce task attempt. |
| @param jobId job id |
| @param taskId task id |
| @param taskAttemptId task attempt id |
| @param timestamp time stamp when task failed |
| @param hostName host name of the task attempt. |
| @param error error message of the task.]]> |
| </doc> |
| </method> |
| <method name="logKilled" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="taskId" type="java.lang.String"/> |
| <param name="taskAttemptId" type="java.lang.String"/> |
| <param name="timestamp" type="long"/> |
| <param name="hostName" type="java.lang.String"/> |
| <param name="error" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Log killed reduce task attempt. |
| @param jobId job id |
| @param taskId task id |
| @param taskAttemptId task attempt id |
| @param timestamp time stamp when task failed |
| @param hostName host name of the task attempt. |
| @param error error message of the task.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Helper class for logging or reading back events related to start, finish or failure of |
| a Map Attempt on a node.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobHistory.ReduceAttempt --> |
| <!-- start class org.apache.hadoop.mapred.JobHistory.Task --> |
| <class name="JobHistory.Task" extends="org.apache.hadoop.mapred.JobHistory.KeyValuePair" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JobHistory.Task" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="logStarted" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="taskId" type="java.lang.String"/> |
| <param name="taskType" type="java.lang.String"/> |
| <param name="startTime" type="long"/> |
| <doc> |
| <![CDATA[Log start time of task (TIP). |
| @param jobId job id |
| @param taskId task id |
| @param taskType MAP or REDUCE |
| @param startTime startTime of tip.]]> |
| </doc> |
| </method> |
| <method name="logFinished" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="taskId" type="java.lang.String"/> |
| <param name="taskType" type="java.lang.String"/> |
| <param name="finishTime" type="long"/> |
| <param name="counters" type="org.apache.hadoop.mapred.Counters"/> |
| <doc> |
| <![CDATA[Log finish time of task. |
| @param jobId job id |
| @param taskId task id |
| @param taskType MAP or REDUCE |
| @param finishTime finish timeof task in ms]]> |
| </doc> |
| </method> |
| <method name="logFailed" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="taskId" type="java.lang.String"/> |
| <param name="taskType" type="java.lang.String"/> |
| <param name="time" type="long"/> |
| <param name="error" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Log job failed event. |
| @param jobId jobid |
| @param taskId task id |
| @param taskType MAP or REDUCE. |
| @param time timestamp when job failed detected. |
| @param error error message for failure.]]> |
| </doc> |
| </method> |
| <method name="getTaskAttempts" return="java.util.Map<java.lang.String, org.apache.hadoop.mapred.JobHistory.TaskAttempt>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns all task attempts for this task. <task attempt id - TaskAttempt>]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Helper class for logging or reading back events related to Task's start, finish or failure. |
| All events logged by this class are logged in a separate file per job in |
| job tracker history. These events map to TIPs in jobtracker.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobHistory.Task --> |
| <!-- start class org.apache.hadoop.mapred.JobHistory.TaskAttempt --> |
| <class name="JobHistory.TaskAttempt" extends="org.apache.hadoop.mapred.JobHistory.Task" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JobHistory.TaskAttempt" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[Base class for Map and Reduce TaskAttempts.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobHistory.TaskAttempt --> |
| <!-- start class org.apache.hadoop.mapred.JobHistory.Values --> |
| <class name="JobHistory.Values" extends="java.lang.Enum<org.apache.hadoop.mapred.JobHistory.Values>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.mapred.JobHistory.Values[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.mapred.JobHistory.Values" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| <doc> |
| <![CDATA[This enum contains some of the values commonly used by history log events. |
| since values in history can only be strings - Values.name() is used in |
| most places in history file.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobHistory.Values --> |
| <!-- start class org.apache.hadoop.mapred.JobPriority --> |
| <class name="JobPriority" extends="java.lang.Enum<org.apache.hadoop.mapred.JobPriority>" |
| abstract="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.mapred.JobPriority[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.mapred.JobPriority" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| <doc> |
| <![CDATA[Used to describe the priority of the running job.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobPriority --> |
| <!-- start class org.apache.hadoop.mapred.JobProfile --> |
| <class name="JobProfile" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <constructor name="JobProfile" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct an empty {@link JobProfile}.]]> |
| </doc> |
| </constructor> |
| <constructor name="JobProfile" type="java.lang.String, java.lang.String, java.lang.String, java.lang.String, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a {@link JobProfile} the userid, jobid, |
| job config-file, job-details url and job name. |
| |
| @param user userid of the person who submitted the job. |
| @param jobid id of the job. |
| @param jobFile job configuration file. |
| @param url link to the web-ui for details of the job. |
| @param name user-specified job name.]]> |
| </doc> |
| </constructor> |
| <method name="getUser" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the user id.]]> |
| </doc> |
| </method> |
| <method name="getJobId" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the job id.]]> |
| </doc> |
| </method> |
| <method name="getJobFile" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the configuration file for the job.]]> |
| </doc> |
| </method> |
| <method name="getURL" return="java.net.URL" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the link to the web-ui for details of the job.]]> |
| </doc> |
| </method> |
| <method name="getJobName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the user-specified job name.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A JobProfile is a MapReduce primitive. Tracks a job, |
| whether living or dead.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobProfile --> |
| <!-- start class org.apache.hadoop.mapred.JobShell --> |
| <class name="JobShell" extends="org.apache.hadoop.conf.Configured" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.util.Tool"/> |
| <constructor name="JobShell" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="JobShell" type="org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="init" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="run" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[run method from Tool]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Provide command line parsing for JobSubmission |
| job submission looks like |
| hadoop jar -libjars <comma seperated jars> -archives <comma seperated archives> |
| -files <comma seperated files> inputjar args]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobShell --> |
| <!-- start class org.apache.hadoop.mapred.JobStatus --> |
| <class name="JobStatus" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <constructor name="JobStatus" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="JobStatus" type="java.lang.String, float, float, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a job status object for a given jobid. |
| @param jobid The jobid of the job |
| @param mapProgress The progress made on the maps |
| @param reduceProgress The progress made on the reduces |
| @param runState The current state of the job]]> |
| </doc> |
| </constructor> |
| <method name="getJobId" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return The jobid of the Job]]> |
| </doc> |
| </method> |
| <method name="mapProgress" return="float" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return Percentage of progress in maps]]> |
| </doc> |
| </method> |
| <method name="reduceProgress" return="float" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return Percentage of progress in reduce]]> |
| </doc> |
| </method> |
| <method name="getRunState" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return running state of the job]]> |
| </doc> |
| </method> |
| <method name="setRunState" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="state" type="int"/> |
| <doc> |
| <![CDATA[Change the current run state of the job.]]> |
| </doc> |
| </method> |
| <method name="getStartTime" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return start time of the job]]> |
| </doc> |
| </method> |
| <method name="getUsername" return="java.lang.String" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the username of the job]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <field name="RUNNING" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="SUCCEEDED" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="FAILED" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="PREP" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Describes the current status of a job. This is |
| not intended to be a comprehensive piece of data. |
| For that, look at JobProfile.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobStatus --> |
| <!-- start interface org.apache.hadoop.mapred.JobSubmissionProtocol --> |
| <interface name="JobSubmissionProtocol" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.ipc.VersionedProtocol"/> |
| <method name="getNewJobId" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Allocate a name for the job. |
| @return a unique job name for submitting jobs. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="submitJob" return="org.apache.hadoop.mapred.JobStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobName" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Submit a Job for execution. Returns the latest profile for |
| that job. |
| The job files should be submitted in <b>system-dir</b>/<b>jobName</b>.]]> |
| </doc> |
| </method> |
| <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the current status of the cluster |
| @return summary of the state of the cluster]]> |
| </doc> |
| </method> |
| <method name="killJob" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Kill the indicated job]]> |
| </doc> |
| </method> |
| <method name="killTask" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskId" type="java.lang.String"/> |
| <param name="shouldFail" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Kill indicated task attempt. |
| @param taskId the id of the task to kill. |
| @param shouldFail if true the task is failed and added to failed tasks list, otherwise |
| it is just killed, w/o affecting job failure status.]]> |
| </doc> |
| </method> |
| <method name="getJobProfile" return="org.apache.hadoop.mapred.JobProfile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Grab a handle to a job that is already known to the JobTracker. |
| @return Profile of the job, or null if not found.]]> |
| </doc> |
| </method> |
| <method name="getJobStatus" return="org.apache.hadoop.mapred.JobStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Grab a handle to a job that is already known to the JobTracker. |
| @return Status of the job, or null if not found.]]> |
| </doc> |
| </method> |
| <method name="getJobCounters" return="org.apache.hadoop.mapred.Counters" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Grab the current job counters]]> |
| </doc> |
| </method> |
| <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Grab a bunch of info on the map tasks that make up the job]]> |
| </doc> |
| </method> |
| <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Grab a bunch of info on the reduce tasks that make up the job]]> |
| </doc> |
| </method> |
| <method name="getFilesystemName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[A MapReduce system always operates on a single filesystem. This |
| function returns the fs name. ('local' if the localfs; 'addr:port' |
| if dfs). The client can then copy files into the right locations |
| prior to submitting the job.]]> |
| </doc> |
| </method> |
| <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the jobs that are not completed and not failed |
| @return array of JobStatus for the running/to-be-run |
| jobs.]]> |
| </doc> |
| </method> |
| <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get all the jobs submitted. |
| @return array of JobStatus for the submitted jobs]]> |
| </doc> |
| </method> |
| <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| <param name="fromEventId" type="int"/> |
| <param name="maxEvents" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get task completion events for the jobid, starting from fromEventId. |
| Returns empty aray if no events are available. |
| @param jobid job id |
| @param fromEventId event id to start from. |
| @param maxEvents the max number of events we want to look at |
| @return array of task completion events. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getTaskDiagnostics" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="tipId" type="java.lang.String"/> |
| <param name="taskId" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the diagnostics for a given task in a given job |
| @param jobId the id of the job |
| @return an array of the diagnostic messages]]> |
| </doc> |
| </method> |
| <field name="versionID" type="long" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Protocol that a JobClient and the central JobTracker use to communicate. The |
| JobClient can use these methods to submit a Job for execution, and learn about |
| the current system status.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.JobSubmissionProtocol --> |
| <!-- start class org.apache.hadoop.mapred.JobTracker --> |
| <class name="JobTracker" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.MRConstants"/> |
| <implements name="org.apache.hadoop.mapred.InterTrackerProtocol"/> |
| <implements name="org.apache.hadoop.mapred.JobSubmissionProtocol"/> |
| <method name="startTracker" return="org.apache.hadoop.mapred.JobTracker" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <exception name="InterruptedException" type="java.lang.InterruptedException"/> |
| <doc> |
| <![CDATA[Start the JobTracker with given configuration. |
| |
| The conf will be modified to reflect the actual ports on which |
| the JobTracker is up and running if the user passes the port as |
| <code>zero</code>. |
| |
| @param conf configuration for the JobTracker. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="stopTracker" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getProtocolVersion" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="protocol" type="java.lang.String"/> |
| <param name="clientVersion" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getAddress" return="java.net.InetSocketAddress" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| </method> |
| <method name="offerService" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="InterruptedException" type="java.lang.InterruptedException"/> |
| <doc> |
| <![CDATA[Run forever]]> |
| </doc> |
| </method> |
| <method name="getTotalSubmissions" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getJobTrackerMachine" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getTrackerIdentifier" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the unique identifier (ie. timestamp) of this job tracker start. |
| @return a string with a unique identifier]]> |
| </doc> |
| </method> |
| <method name="getTrackerPort" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getInfoPort" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getStartTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="runningJobs" return="java.util.Vector<org.apache.hadoop.mapred.JobInProgress>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getRunningJobs" return="java.util.List<org.apache.hadoop.mapred.JobInProgress>" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Version that is called from a timer thread, and therefore needs to be |
| careful to synchronize.]]> |
| </doc> |
| </method> |
| <method name="failedJobs" return="java.util.Vector<org.apache.hadoop.mapred.JobInProgress>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="completedJobs" return="java.util.Vector<org.apache.hadoop.mapred.JobInProgress>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="taskTrackers" return="java.util.Collection" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getTaskTracker" return="org.apache.hadoop.mapred.TaskTrackerStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="trackerID" type="java.lang.String"/> |
| </method> |
| <method name="resolveAndAddToTopology" return="org.apache.hadoop.net.Node" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| <method name="getNodesAtMaxLevel" return="java.util.Collection<org.apache.hadoop.net.Node>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns a collection of nodes at the max level]]> |
| </doc> |
| </method> |
| <method name="getParentNode" return="org.apache.hadoop.net.Node" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="node" type="org.apache.hadoop.net.Node"/> |
| <param name="level" type="int"/> |
| </method> |
| <method name="getNode" return="org.apache.hadoop.net.Node" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Return the Node in the network topology that corresponds to the hostname]]> |
| </doc> |
| </method> |
| <method name="getNumTaskCacheLevels" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getNumResolvedTaskTrackers" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="heartbeat" return="org.apache.hadoop.mapred.HeartbeatResponse" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="status" type="org.apache.hadoop.mapred.TaskTrackerStatus"/> |
| <param name="initialContact" type="boolean"/> |
| <param name="acceptNewTasks" type="boolean"/> |
| <param name="responseId" type="short"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The periodic heartbeat mechanism between the {@link TaskTracker} and |
| the {@link JobTracker}. |
| |
| The {@link JobTracker} processes the status information sent by the |
| {@link TaskTracker} and responds with instructions to start/stop |
| tasks or jobs, and also 'reset' instructions during contingencies.]]> |
| </doc> |
| </method> |
| <method name="getFilesystemName" return="java.lang.String" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Grab the local fs name]]> |
| </doc> |
| </method> |
| <method name="reportTaskTrackerError" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskTracker" type="java.lang.String"/> |
| <param name="errorClass" type="java.lang.String"/> |
| <param name="errorMessage" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getNewJobId" return="java.lang.String" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Allocates a new JobId string.]]> |
| </doc> |
| </method> |
| <method name="submitJob" return="org.apache.hadoop.mapred.JobStatus" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[JobTracker.submitJob() kicks off a new job. |
| |
| Create a 'JobInProgress' object, which contains both JobProfile |
| and JobStatus. Those two sub-objects are sometimes shipped outside |
| of the JobTracker. But JobInProgress adds info that's useful for |
| the JobTracker alone. |
| |
| We add the JIP to the jobInitQueue, which is processed |
| asynchronously to handle split-computation and build up |
| the right TaskTracker/Block mapping.]]> |
| </doc> |
| </method> |
| <method name="getClusterStatus" return="org.apache.hadoop.mapred.ClusterStatus" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="killJob" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| </method> |
| <method name="getJobProfile" return="org.apache.hadoop.mapred.JobProfile" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| </method> |
| <method name="getJobStatus" return="org.apache.hadoop.mapred.JobStatus" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| </method> |
| <method name="getJobCounters" return="org.apache.hadoop.mapred.Counters" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| </method> |
| <method name="getMapTaskReports" return="org.apache.hadoop.mapred.TaskReport[]" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| </method> |
| <method name="getReduceTaskReports" return="org.apache.hadoop.mapred.TaskReport[]" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| </method> |
| <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| <param name="fromEventId" type="int"/> |
| <param name="maxEvents" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getTaskDiagnostics" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="tipId" type="java.lang.String"/> |
| <param name="taskId" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the diagnostics for a given task |
| @param jobId the id of the job |
| @param tipId the id of the tip |
| @param taskId the id of the task |
| @return an array of the diagnostic messages]]> |
| </doc> |
| </method> |
| <method name="killTask" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskid" type="java.lang.String"/> |
| <param name="shouldFail" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Mark a Task to be killed]]> |
| </doc> |
| </method> |
| <method name="getAssignedTracker" return="java.lang.String" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskId" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Get tracker name for a given task id. |
| @param taskId the name of the task |
| @return The name of the task tracker]]> |
| </doc> |
| </method> |
| <method name="jobsToComplete" return="org.apache.hadoop.mapred.JobStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getAllJobs" return="org.apache.hadoop.mapred.JobStatus[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getJob" return="org.apache.hadoop.mapred.JobInProgress" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobid" type="java.lang.String"/> |
| </method> |
| <method name="getLocalJobFilePath" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Get the localized job file path on the job trackers local file system |
| @param jobId id of the job |
| @return the path of the job conf file on the local file system]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <exception name="InterruptedException" type="java.lang.InterruptedException"/> |
| <doc> |
| <![CDATA[Start the JobTracker process. This is used only for debugging. As a rule, |
| JobTracker should be run as part of the DFS Namenode process.]]> |
| </doc> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[JobTracker is the central location for submitting and |
| tracking MR jobs in a network environment.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobTracker --> |
| <!-- start class org.apache.hadoop.mapred.JobTracker.IllegalStateException --> |
| <class name="JobTracker.IllegalStateException" extends="java.io.IOException" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JobTracker.IllegalStateException" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[A client tried to submit a job before the Job Tracker was ready.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobTracker.IllegalStateException --> |
| <!-- start class org.apache.hadoop.mapred.JobTracker.State --> |
| <class name="JobTracker.State" extends="java.lang.Enum<org.apache.hadoop.mapred.JobTracker.State>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.mapred.JobTracker.State[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.mapred.JobTracker.State" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.JobTracker.State --> |
| <!-- start class org.apache.hadoop.mapred.KeyValueLineRecordReader --> |
| <class name="KeyValueLineRecordReader" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.RecordReader<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>"/> |
| <constructor name="KeyValueLineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <method name="getKeyClass" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="createKey" return="org.apache.hadoop.io.Text" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="createValue" return="org.apache.hadoop.io.Text" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="findSeparator" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="utf" type="byte[]"/> |
| <param name="start" type="int"/> |
| <param name="length" type="int"/> |
| <param name="sep" type="byte"/> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.Text"/> |
| <param name="value" type="org.apache.hadoop.io.Text"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read key/value pair in a line.]]> |
| </doc> |
| </method> |
| <method name="getProgress" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getPos" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[This class treats a line in the input as a key/value pair separated by a |
| separator character. The separator can be specified in config file |
| under the attribute name key.value.separator.in.input.line. The default |
| separator is the tab character ('\t').]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.KeyValueLineRecordReader --> |
| <!-- start class org.apache.hadoop.mapred.KeyValueTextInputFormat --> |
| <class name="KeyValueTextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.JobConfigurable"/> |
| <constructor name="KeyValueTextInputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| </method> |
| <method name="isSplitable" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="file" type="org.apache.hadoop.fs.Path"/> |
| </method> |
| <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines. |
| Either linefeed or carriage-return are used to signal end of line. Each line |
| is divided into key and value parts by a separator byte. If no such a byte |
| exists, the key will be the entire line and value will be empty.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.KeyValueTextInputFormat --> |
| <!-- start class org.apache.hadoop.mapred.LineRecordReader --> |
| <class name="LineRecordReader" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.RecordReader<org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text>"/> |
| <constructor name="LineRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <constructor name="LineRecordReader" type="java.io.InputStream, long, long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="LineRecordReader" type="java.io.InputStream, long, long, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <method name="createKey" return="org.apache.hadoop.io.LongWritable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="createValue" return="org.apache.hadoop.io.Text" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.LongWritable"/> |
| <param name="value" type="org.apache.hadoop.io.Text"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read a line.]]> |
| </doc> |
| </method> |
| <method name="getProgress" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the progress within the split]]> |
| </doc> |
| </method> |
| <method name="getPos" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[Treats keys as offset in file and value as line.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.LineRecordReader --> |
| <!-- start class org.apache.hadoop.mapred.LineRecordReader.LineReader --> |
| <class name="LineRecordReader.LineReader" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="LineRecordReader.LineReader" type="java.io.InputStream, org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a line reader that reads from the given stream using the |
| <code>io.file.buffer.size</code> specified in the given |
| <code>Configuration</code>. |
| @param in input stream |
| @param conf configuration |
| @throws IOException]]> |
| </doc> |
| </constructor> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Close the underlying stream. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="readLine" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="str" type="org.apache.hadoop.io.Text"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read from the InputStream into the given Text. |
| @param str the object to store the given line |
| @return the number of bytes read including the newline |
| @throws IOException if the underlying stream throws]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A class that provides a line reader from an input stream.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.LineRecordReader.LineReader --> |
| <!-- start class org.apache.hadoop.mapred.MapFileOutputFormat --> |
| <class name="MapFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat<org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MapFileOutputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter<org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="name" type="java.lang.String"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getReaders" return="org.apache.hadoop.io.MapFile.Reader[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="dir" type="org.apache.hadoop.fs.Path"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Open the output generated by this format.]]> |
| </doc> |
| </method> |
| <method name="getEntry" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="readers" type="org.apache.hadoop.io.MapFile.Reader[]"/> |
| <param name="partitioner" type="org.apache.hadoop.mapred.Partitioner<K, V>"/> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <param name="value" type="V extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get an entry from output generated by this class.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[An {@link OutputFormat} that writes {@link MapFile}s.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.MapFileOutputFormat --> |
| <!-- start interface org.apache.hadoop.mapred.Mapper --> |
| <interface name="Mapper" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.JobConfigurable"/> |
| <implements name="org.apache.hadoop.io.Closeable"/> |
| <method name="map" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K1"/> |
| <param name="value" type="V1"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<K2, V2>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Maps a single input key/value pair into an intermediate key/value pair. |
| |
| <p>Output pairs need not be of the same types as input pairs. A given |
| input pair may map to zero or many output pairs. Output pairs are |
| collected with calls to |
| {@link OutputCollector#collect(Object,Object)}.</p> |
| |
| <p>Applications can use the {@link Reporter} provided to report progress |
| or just indicate that they are alive. In scenarios where the application |
| takes an insignificant amount of time to process individual key/value |
| pairs, this is crucial since the framework might assume that the task has |
| timed-out and kill that task. The other way of avoiding this is to set |
| <a href="{@docRoot}/../hadoop-default.html#mapred.task.timeout"> |
| mapred.task.timeout</a> to a high-enough value (or even zero for no |
| time-outs).</p> |
| |
| @param key the input key. |
| @param value the input value. |
| @param output collects mapped keys and values. |
| @param reporter facility to report progress.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Maps input key/value pairs to a set of intermediate key/value pairs. |
| |
| <p>Maps are the individual tasks which transform input records into a |
| intermediate records. The transformed intermediate records need not be of |
| the same type as the input records. A given input pair may map to zero or |
| many output pairs.</p> |
| |
| <p>The Hadoop Map-Reduce framework spawns one map task for each |
| {@link InputSplit} generated by the {@link InputFormat} for the job. |
| <code>Mapper</code> implementations can access the {@link JobConf} for the |
| job via the {@link JobConfigurable#configure(JobConf)} and initialize |
| themselves. Similarly they can use the {@link Closeable#close()} method for |
| de-initialization.</p> |
| |
| <p>The framework then calls |
| {@link #map(Object, Object, OutputCollector, Reporter)} |
| for each key/value pair in the <code>InputSplit</code> for that task.</p> |
| |
| <p>All intermediate values associated with a given output key are |
| subsequently grouped by the framework, and passed to a {@link Reducer} to |
| determine the final output. Users can control the grouping by specifying |
| a <code>Comparator</code> via |
| {@link JobConf#setOutputKeyComparatorClass(Class)}.</p> |
| |
| <p>The grouped <code>Mapper</code> outputs are partitioned per |
| <code>Reducer</code>. Users can control which keys (and hence records) go to |
| which <code>Reducer</code> by implementing a custom {@link Partitioner}. |
| |
| <p>Users can optionally specify a <code>combiner</code>, via |
| {@link JobConf#setCombinerClass(Class)}, to perform local aggregation of the |
| intermediate outputs, which helps to cut down the amount of data transferred |
| from the <code>Mapper</code> to the <code>Reducer</code>. |
| |
| <p>The intermediate, grouped outputs are always stored in |
| {@link SequenceFile}s. Applications can specify if and how the intermediate |
| outputs are to be compressed and which {@link CompressionCodec}s are to be |
| used via the <code>JobConf</code>.</p> |
| |
| <p>If the job has |
| <a href="{@docRoot}/org/apache/hadoop/mapred/JobConf.html#ReducerNone">zero |
| reduces</a> then the output of the <code>Mapper</code> is directly written |
| to the {@link FileSystem} without grouping by keys.</p> |
| |
| <p>Example:</p> |
| <p><blockquote><pre> |
| public class MyMapper<K extends WritableComparable, V extends Writable> |
| extends MapReduceBase implements Mapper<K, V, K, V> { |
| |
| static enum MyCounters { NUM_RECORDS } |
| |
| private String mapTaskId; |
| private String inputFile; |
| private int noRecords = 0; |
| |
| public void configure(JobConf job) { |
| mapTaskId = job.get("mapred.task.id"); |
| inputFile = job.get("mapred.input.file"); |
| } |
| |
| public void map(K key, V val, |
| OutputCollector<K, V> output, Reporter reporter) |
| throws IOException { |
| // Process the <key, value> pair (assume this takes a while) |
| // ... |
| // ... |
| |
| // Let the framework know that we are alive, and kicking! |
| // reporter.progress(); |
| |
| // Process some more |
| // ... |
| // ... |
| |
| // Increment the no. of <key, value> pairs processed |
| ++noRecords; |
| |
| // Increment counters |
| reporter.incrCounter(NUM_RECORDS, 1); |
| |
| // Every 100 records update application-level status |
| if ((noRecords%100) == 0) { |
| reporter.setStatus(mapTaskId + " processed " + noRecords + |
| " from input-file: " + inputFile); |
| } |
| |
| // Output the result |
| output.collect(key, val); |
| } |
| } |
| </pre></blockquote></p> |
| |
| <p>Applications may write a custom {@link MapRunnable} to exert greater |
| control on map processing e.g. multi-threaded <code>Mapper</code>s etc.</p> |
| |
| @see JobConf |
| @see InputFormat |
| @see Partitioner |
| @see Reducer |
| @see MapReduceBase |
| @see MapRunnable |
| @see SequenceFile]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.Mapper --> |
| <!-- start class org.apache.hadoop.mapred.MapReduceBase --> |
| <class name="MapReduceBase" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Closeable"/> |
| <implements name="org.apache.hadoop.mapred.JobConfigurable"/> |
| <constructor name="MapReduceBase" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Default implementation that does nothing.]]> |
| </doc> |
| </method> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Default implementation that does nothing.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Base class for {@link Mapper} and {@link Reducer} implementations. |
| |
| <p>Provides default no-op implementations for a few methods, most non-trivial |
| applications need to override some of them.</p>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.MapReduceBase --> |
| <!-- start interface org.apache.hadoop.mapred.MapRunnable --> |
| <interface name="MapRunnable" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.JobConfigurable"/> |
| <method name="run" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="input" type="org.apache.hadoop.mapred.RecordReader<K1, V1>"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<K2, V2>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Start mapping input <tt><key, value></tt> pairs. |
| |
| <p>Mapping of input records to output records is complete when this method |
| returns.</p> |
| |
| @param input the {@link RecordReader} to read the input records. |
| @param output the {@link OutputCollector} to collect the outputrecords. |
| @param reporter {@link Reporter} to report progress, status-updates etc. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Expert: Generic interface for {@link Mapper}s. |
| |
| <p>Custom implementations of <code>MapRunnable</code> can exert greater |
| control on map processing e.g. multi-threaded, asynchronous mappers etc.</p> |
| |
| @see Mapper]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.MapRunnable --> |
| <!-- start class org.apache.hadoop.mapred.MapRunner --> |
| <class name="MapRunner" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.MapRunnable<K1, V1, K2, V2>"/> |
| <constructor name="MapRunner" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| </method> |
| <method name="run" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="input" type="org.apache.hadoop.mapred.RecordReader<K1, V1>"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<K2, V2>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[Default {@link MapRunnable} implementation.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.MapRunner --> |
| <!-- start class org.apache.hadoop.mapred.MapTaskStatus --> |
| <class name="MapTaskStatus" extends="org.apache.hadoop.mapred.TaskStatus" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MapTaskStatus" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="MapTaskStatus" type="java.lang.String, float, org.apache.hadoop.mapred.TaskStatus.State, java.lang.String, java.lang.String, java.lang.String, org.apache.hadoop.mapred.TaskStatus.Phase, org.apache.hadoop.mapred.Counters" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getIsMap" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getShuffleFinishTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getSortFinishTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.MapTaskStatus --> |
| <!-- start class org.apache.hadoop.mapred.MultiFileInputFormat --> |
| <class name="MultiFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat<K, V>" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MultiFileInputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="numSplits" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader<K, V>" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="split" type="org.apache.hadoop.mapred.InputSplit"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[An abstract {@link InputFormat} that returns {@link MultiFileSplit}'s |
| in {@link #getSplits(JobConf, int)} method. Splits are constructed from |
| the files under the input paths. Each split returned contains <i>nearly</i> |
| equal content length. <br> |
| Subclasses implement {@link #getRecordReader(InputSplit, JobConf, Reporter)} |
| to construct <code>RecordReader</code>'s for <code>MultiFileSplit</code>'s. |
| @see MultiFileSplit]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.MultiFileInputFormat --> |
| <!-- start class org.apache.hadoop.mapred.MultiFileSplit --> |
| <class name="MultiFileSplit" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.InputSplit"/> |
| <constructor name="MultiFileSplit" type="org.apache.hadoop.mapred.JobConf, org.apache.hadoop.fs.Path[], long[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getLengths" return="long[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns an array containing the lengths of the files in |
| the split]]> |
| </doc> |
| </method> |
| <method name="getLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| <doc> |
| <![CDATA[Returns the length of the i<sup>th</sup> Path]]> |
| </doc> |
| </method> |
| <method name="getNumPaths" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the number of Paths in the split]]> |
| </doc> |
| </method> |
| <method name="getPath" return="org.apache.hadoop.fs.Path" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| <doc> |
| <![CDATA[Returns the i<sup>th</sup> Path]]> |
| </doc> |
| </method> |
| <method name="getPaths" return="org.apache.hadoop.fs.Path[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns all the Paths in the split]]> |
| </doc> |
| </method> |
| <method name="getLocations" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[A sub-collection of input files. Unlike {@link FileSplit}, MultiFileSplit |
| class does not represent a split of a file, but a split of input files |
| into smaller sets. The atomic unit of split is a file. <br> |
| MultiFileSplit can be used to implement {@link RecordReader}'s, with |
| reading one record per file. |
| @see FileSplit |
| @see MultiFileInputFormat]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.MultiFileSplit --> |
| <!-- start interface org.apache.hadoop.mapred.OutputCollector --> |
| <interface name="OutputCollector" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="collect" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K"/> |
| <param name="value" type="V"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Adds a key/value pair to the output. |
| |
| @param key the key to collect. |
| @param value to value to collect. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Collects the <code><key, value></code> pairs output by {@link Mapper}s |
| and {@link Reducer}s. |
| |
| <p><code>OutputCollector</code> is the generalization of the facility |
| provided by the Map-Reduce framework to collect data output by either the |
| <code>Mapper</code> or the <code>Reducer</code> i.e. intermediate outputs |
| or the output of the job.</p>]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.OutputCollector --> |
| <!-- start interface org.apache.hadoop.mapred.OutputFormat --> |
| <interface name="OutputFormat" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter<K, V>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="name" type="java.lang.String"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the {@link RecordWriter} for the given job. |
| |
| @param ignored |
| @param job configuration for the job whose output is being written. |
| @param name the unique name for this part of the output. |
| @param progress mechanism for reporting progress while writing to file. |
| @return a {@link RecordWriter} to write the output for the job. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="checkOutputSpecs" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Check for validity of the output-specification for the job. |
| |
| <p>This is to validate the output specification for the job when it is |
| a job is submitted. Typically checks that it does not already exist, |
| throwing an exception when it already exists, so that output is not |
| overwritten.</p> |
| |
| @param ignored |
| @param job job configuration. |
| @throws IOException when output should not be attempted]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[<code>OutputFormat</code> describes the output-specification for a |
| Map-Reduce job. |
| |
| <p>The Map-Reduce framework relies on the <code>OutputFormat</code> of the |
| job to:<p> |
| <ol> |
| <li> |
| Validate the output-specification of the job. For e.g. check that the |
| output directory doesn't already exist. |
| <li> |
| Provide the {@link RecordWriter} implementation to be used to write out |
| the output files of the job. Output files are stored in a |
| {@link FileSystem}. |
| </li> |
| </ol> |
| |
| @see RecordWriter |
| @see JobConf]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.OutputFormat --> |
| <!-- start class org.apache.hadoop.mapred.OutputFormatBase --> |
| <class name="OutputFormatBase" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="Use {@link FileOutputFormat}"> |
| <implements name="org.apache.hadoop.mapred.OutputFormat<K, V>"/> |
| <constructor name="OutputFormatBase" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setCompressOutput" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="compress" type="boolean"/> |
| <doc> |
| <![CDATA[Set whether the output of the job is compressed. |
| @param conf the {@link JobConf} to modify |
| @param compress should the output of the job be compressed?]]> |
| </doc> |
| </method> |
| <method name="getCompressOutput" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Is the job output compressed? |
| @param conf the {@link JobConf} to look in |
| @return <code>true</code> if the job output should be compressed, |
| <code>false</code> otherwise]]> |
| </doc> |
| </method> |
| <method name="setOutputCompressorClass" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="codecClass" type="java.lang.Class<? extends org.apache.hadoop.io.compress.CompressionCodec>"/> |
| <doc> |
| <![CDATA[Set the {@link CompressionCodec} to be used to compress job outputs. |
| @param conf the {@link JobConf} to modify |
| @param codecClass the {@link CompressionCodec} to be used to |
| compress the job outputs]]> |
| </doc> |
| </method> |
| <method name="getOutputCompressorClass" return="java.lang.Class<? extends org.apache.hadoop.io.compress.CompressionCodec>" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="defaultValue" type="java.lang.Class<? extends org.apache.hadoop.io.compress.CompressionCodec>"/> |
| <doc> |
| <![CDATA[Get the {@link CompressionCodec} for compressing the job outputs. |
| @param conf the {@link JobConf} to look in |
| @param defaultValue the {@link CompressionCodec} to return if not set |
| @return the {@link CompressionCodec} to be used to compress the |
| job outputs |
| @throws IllegalArgumentException if the class was specified, but not found]]> |
| </doc> |
| </method> |
| <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter<K, V>" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="name" type="java.lang.String"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="checkOutputSpecs" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <exception name="FileAlreadyExistsException" type="org.apache.hadoop.mapred.FileAlreadyExistsException"/> |
| <exception name="InvalidJobConfException" type="org.apache.hadoop.mapred.InvalidJobConfException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A base class for {@link OutputFormat}. |
| @deprecated Use {@link FileOutputFormat}]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.OutputFormatBase --> |
| <!-- start class org.apache.hadoop.mapred.OutputLogFilter --> |
| <class name="OutputLogFilter" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.fs.PathFilter"/> |
| <constructor name="OutputLogFilter" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="accept" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="org.apache.hadoop.fs.Path"/> |
| </method> |
| <doc> |
| <![CDATA[This class filters log files from directory given |
| It doesnt accept paths having _logs. |
| This can be used to list paths of output directory as follows: |
| Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir, |
| new OutputLogFilter()));]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.OutputLogFilter --> |
| <!-- start interface org.apache.hadoop.mapred.Partitioner --> |
| <interface name="Partitioner" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.JobConfigurable"/> |
| <method name="getPartition" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K2"/> |
| <param name="value" type="V2"/> |
| <param name="numPartitions" type="int"/> |
| <doc> |
| <![CDATA[Get the paritition number for a given key (hence record) given the total |
| number of partitions i.e. number of reduce-tasks for the job. |
| |
| <p>Typically a hash function on a all or a subset of the key.</p> |
| |
| @param key the key to be paritioned. |
| @param value the entry value. |
| @param numPartitions the total number of partitions. |
| @return the partition number for the <code>key</code>.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Partitions the key space. |
| |
| <p><code>Partitioner</code> controls the partitioning of the keys of the |
| intermediate map-outputs. The key (or a subset of the key) is used to derive |
| the partition, typically by a hash function. The total number of partitions |
| is the same as the number of reduce tasks for the job. Hence this controls |
| which of the <code>m</code> reduce tasks the intermediate key (and hence the |
| record) is sent for reduction.</p> |
| |
| @see Reducer]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.Partitioner --> |
| <!-- start interface org.apache.hadoop.mapred.RecordReader --> |
| <interface name="RecordReader" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K"/> |
| <param name="value" type="V"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Reads the next key/value pair from the input for processing. |
| |
| @param key the key to read data into |
| @param value the value to read data into |
| @return true iff a key/value was read, false if at EOF]]> |
| </doc> |
| </method> |
| <method name="createKey" return="K" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create an object of the appropriate type to be used as a key. |
| |
| @return a new key object.]]> |
| </doc> |
| </method> |
| <method name="createValue" return="V" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create an object of the appropriate type to be used as a value. |
| |
| @return a new value object.]]> |
| </doc> |
| </method> |
| <method name="getPos" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the current position in the input. |
| |
| @return the current position in the input. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Close this {@link InputSplit} to future operations. |
| |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getProgress" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[How much of the input has the {@link RecordReader} consumed i.e. |
| has been processed by? |
| |
| @return progress from <code>0.0</code> to <code>1.0</code>. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[<code>RecordReader</code> reads <key, value> pairs from an |
| {@link InputSplit}. |
| |
| <p><code>RecordReader</code>, typically, converts the byte-oriented view of |
| the input, provided by the <code>InputSplit</code>, and presents a |
| record-oriented view for the {@link Mapper} & {@link Reducer} tasks for |
| processing. It thus assumes the responsibility of processing record |
| boundaries and presenting the tasks with keys and values.</p> |
| |
| @see InputSplit |
| @see InputFormat]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.RecordReader --> |
| <!-- start interface org.apache.hadoop.mapred.RecordWriter --> |
| <interface name="RecordWriter" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K"/> |
| <param name="value" type="V"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Writes a key/value pair. |
| |
| @param key the key to write. |
| @param value the value to write. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Close this <code>RecordWriter</code> to future operations. |
| |
| @param reporter facility to report progress. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[<code>RecordWriter</code> writes the output <key, value> pairs |
| to an output file. |
| |
| <p><code>RecordWriter</code> implementations write the job outputs to the |
| {@link FileSystem}. |
| |
| @see OutputFormat]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.RecordWriter --> |
| <!-- start interface org.apache.hadoop.mapred.Reducer --> |
| <interface name="Reducer" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.JobConfigurable"/> |
| <implements name="org.apache.hadoop.io.Closeable"/> |
| <method name="reduce" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K2"/> |
| <param name="values" type="java.util.Iterator<V2>"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<K3, V3>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[<i>Reduces</i> values for a given key. |
| |
| <p>The framework calls this method for each |
| <code><key, (list of values)></code> pair in the grouped inputs. |
| Output values must be of the same type as input values. Input keys must |
| not be altered. Typically all values are combined into zero or one value. |
| </p> |
| |
| <p>Output pairs are collected with calls to |
| {@link OutputCollector#collect(Object,Object)}.</p> |
| |
| <p>Applications can use the {@link Reporter} provided to report progress |
| or just indicate that they are alive. In scenarios where the application |
| takes an insignificant amount of time to process individual key/value |
| pairs, this is crucial since the framework might assume that the task has |
| timed-out and kill that task. The other way of avoiding this is to set |
| <a href="{@docRoot}/../hadoop-default.html#mapred.task.timeout"> |
| mapred.task.timeout</a> to a high-enough value (or even zero for no |
| time-outs).</p> |
| |
| @param key the key. |
| @param values the list of values to reduce. |
| @param output to collect keys and combined values. |
| @param reporter facility to report progress.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Reduces a set of intermediate values which share a key to a smaller set of |
| values. |
| |
| <p>The number of <code>Reducer</code>s for the job is set by the user via |
| {@link JobConf#setNumReduceTasks(int)}. <code>Reducer</code> implementations |
| can access the {@link JobConf} for the job via the |
| {@link JobConfigurable#configure(JobConf)} method and initialize themselves. |
| Similarly they can use the {@link Closeable#close()} method for |
| de-initialization.</p> |
| |
| <p><code>Reducer</code> has 3 primary phases:</p> |
| <ol> |
| <li> |
| |
| <h4 id="Shuffle">Shuffle</h4> |
| |
| <p><code>Reducer</code> is input the grouped output of a {@link Mapper}. |
| In the phase the framework, for each <code>Reducer</code>, fetches the |
| relevant partition of the output of all the <code>Mapper</code>s, via HTTP. |
| </p> |
| </li> |
| |
| <li> |
| <h4 id="Sort">Sort</h4> |
| |
| <p>The framework groups <code>Reducer</code> inputs by <code>key</code>s |
| (since different <code>Mapper</code>s may have output the same key) in this |
| stage.</p> |
| |
| <p>The shuffle and sort phases occur simultaneously i.e. while outputs are |
| being fetched they are merged.</p> |
| |
| <h5 id="SecondarySort">SecondarySort</h5> |
| |
| <p>If equivalence rules for keys while grouping the intermediates are |
| different from those for grouping keys before reduction, then one may |
| specify a <code>Comparator</code> via |
| {@link JobConf#setOutputValueGroupingComparator(Class)}.Since |
| {@link JobConf#setOutputKeyComparatorClass(Class)} can be used to |
| control how intermediate keys are grouped, these can be used in conjunction |
| to simulate <i>secondary sort on values</i>.</p> |
| |
| |
| For example, say that you want to find duplicate web pages and tag them |
| all with the url of the "best" known example. You would set up the job |
| like: |
| <ul> |
| <li>Map Input Key: url</li> |
| <li>Map Input Value: document</li> |
| <li>Map Output Key: document checksum, url pagerank</li> |
| <li>Map Output Value: url</li> |
| <li>Partitioner: by checksum</li> |
| <li>OutputKeyComparator: by checksum and then decreasing pagerank</li> |
| <li>OutputValueGroupingComparator: by checksum</li> |
| </ul> |
| </li> |
| |
| <li> |
| <h4 id="Reduce">Reduce</h4> |
| |
| <p>In this phase the |
| {@link #reduce(Object, Iterator, OutputCollector, Reporter)} |
| method is called for each <code><key, (list of values)></code> pair in |
| the grouped inputs.</p> |
| <p>The output of the reduce task is typically written to the |
| {@link FileSystem} via |
| {@link OutputCollector#collect(Object, Object)}.</p> |
| </li> |
| </ol> |
| |
| <p>The output of the <code>Reducer</code> is <b>not re-sorted</b>.</p> |
| |
| <p>Example:</p> |
| <p><blockquote><pre> |
| public class MyReducer<K extends WritableComparable, V extends Writable> |
| extends MapReduceBase implements Reducer<K, V, K, V> { |
| |
| static enum MyCounters { NUM_RECORDS } |
| |
| private String reduceTaskId; |
| private int noKeys = 0; |
| |
| public void configure(JobConf job) { |
| reduceTaskId = job.get("mapred.task.id"); |
| } |
| |
| public void reduce(K key, Iterator<V> values, |
| OutputCollector<K, V> output, |
| Reporter reporter) |
| throws IOException { |
| |
| // Process |
| int noValues = 0; |
| while (values.hasNext()) { |
| V value = values.next(); |
| |
| // Increment the no. of values for this key |
| ++noValues; |
| |
| // Process the <key, value> pair (assume this takes a while) |
| // ... |
| // ... |
| |
| // Let the framework know that we are alive, and kicking! |
| if ((noValues%10) == 0) { |
| reporter.progress(); |
| } |
| |
| // Process some more |
| // ... |
| // ... |
| |
| // Output the <key, value> |
| output.collect(key, value); |
| } |
| |
| // Increment the no. of <key, list of values> pairs processed |
| ++noKeys; |
| |
| // Increment counters |
| reporter.incrCounter(NUM_RECORDS, 1); |
| |
| // Every 100 keys update application-level status |
| if ((noKeys%100) == 0) { |
| reporter.setStatus(reduceTaskId + " processed " + noKeys); |
| } |
| } |
| } |
| </pre></blockquote></p> |
| |
| @see Mapper |
| @see Partitioner |
| @see Reporter |
| @see MapReduceBase]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.Reducer --> |
| <!-- start class org.apache.hadoop.mapred.ReduceTaskStatus --> |
| <class name="ReduceTaskStatus" extends="org.apache.hadoop.mapred.TaskStatus" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ReduceTaskStatus" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="ReduceTaskStatus" type="java.lang.String, float, org.apache.hadoop.mapred.TaskStatus.State, java.lang.String, java.lang.String, java.lang.String, org.apache.hadoop.mapred.TaskStatus.Phase, org.apache.hadoop.mapred.Counters" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="clone" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getIsMap" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getShuffleFinishTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getSortFinishTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getFetchFailedMaps" return="java.util.List<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.ReduceTaskStatus --> |
| <!-- start interface org.apache.hadoop.mapred.Reporter --> |
| <interface name="Reporter" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.util.Progressable"/> |
| <method name="setStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="status" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the status description for the task. |
| |
| @param status brief description of the current status.]]> |
| </doc> |
| </method> |
| <method name="incrCounter" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="java.lang.Enum"/> |
| <param name="amount" type="long"/> |
| <doc> |
| <![CDATA[Increments the counter identified by the key, which can be of |
| any {@link Enum} type, by the specified amount. |
| |
| @param key key to identify the counter to be incremented. The key can be |
| be any <code>Enum</code>. |
| @param amount A non-negative amount by which the counter is to |
| be incremented.]]> |
| </doc> |
| </method> |
| <method name="getInputSplit" return="org.apache.hadoop.mapred.InputSplit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="UnsupportedOperationException" type="java.lang.UnsupportedOperationException"/> |
| <doc> |
| <![CDATA[Get the {@link InputSplit} object for a map. |
| |
| @return the <code>InputSplit</code> that the map is reading from. |
| @throws UnsupportedOperationException if called outside a mapper]]> |
| </doc> |
| </method> |
| <field name="NULL" type="org.apache.hadoop.mapred.Reporter" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[A constant of Reporter type that does nothing.]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[A facility for Map-Reduce applications to report progress and update |
| counters, status information etc. |
| |
| <p>{@link Mapper} and {@link Reducer} can use the <code>Reporter</code> |
| provided to report progress or just indicate that they are alive. In |
| scenarios where the application takes an insignificant amount of time to |
| process individual key/value pairs, this is crucial since the framework |
| might assume that the task has timed-out and kill that task. |
| |
| <p>Applications can also update {@link Counters} via the provided |
| <code>Reporter</code> .</p> |
| |
| @see Progressable |
| @see Counters]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.Reporter --> |
| <!-- start interface org.apache.hadoop.mapred.RunningJob --> |
| <interface name="RunningJob" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getJobID" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the job identifier. |
| |
| @return the job identifier.]]> |
| </doc> |
| </method> |
| <method name="getJobName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the name of the job. |
| |
| @return the name of the job.]]> |
| </doc> |
| </method> |
| <method name="getJobFile" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the path of the submitted job configuration. |
| |
| @return the path of the submitted job configuration.]]> |
| </doc> |
| </method> |
| <method name="getTrackingURL" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the URL where some job progress information will be displayed. |
| |
| @return the URL where some job progress information will be displayed.]]> |
| </doc> |
| </method> |
| <method name="mapProgress" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the <i>progress</i> of the job's map-tasks, as a float between 0.0 |
| and 1.0. When all map tasks have completed, the function returns 1.0. |
| |
| @return the progress of the job's map-tasks. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="reduceProgress" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the <i>progress</i> of the job's reduce-tasks, as a float between 0.0 |
| and 1.0. When all reduce tasks have completed, the function returns 1.0. |
| |
| @return the progress of the job's reduce-tasks. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="isComplete" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Check if the job is finished or not. |
| This is a non-blocking call. |
| |
| @return <code>true</code> if the job is complete, else <code>false</code>. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="isSuccessful" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Check if the job completed successfully. |
| |
| @return <code>true</code> if the job succeeded, else <code>false</code>. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="waitForCompletion" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Blocks until the job is complete. |
| |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="killJob" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Kill the running job. Blocks until all job tasks have been |
| killed as well. If the job is no longer running, it simply returns. |
| |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getTaskCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="startFrom" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get events indicating completion (success/failure) of component tasks. |
| |
| @param startFrom index to start fetching events from |
| @return an array of {@link TaskCompletionEvent}s |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="killTask" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskId" type="java.lang.String"/> |
| <param name="shouldFail" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Kill indicated task attempt. |
| |
| @param taskId the id of the task to be terminated. |
| @param shouldFail if true the task is failed and added to failed tasks |
| list, otherwise it is just killed, w/o affecting |
| job failure status. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getCounters" return="org.apache.hadoop.mapred.Counters" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Gets the counters for this job. |
| |
| @return the counters for this job. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[<code>RunningJob</code> is the user-interface to query for details on a |
| running Map-Reduce job. |
| |
| <p>Clients can get hold of <code>RunningJob</code> via the {@link JobClient} |
| and then query the running-job for details such as name, configuration, |
| progress etc.</p> |
| |
| @see JobClient]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.RunningJob --> |
| <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat --> |
| <class name="SequenceFileAsBinaryInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat<org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SequenceFileAsBinaryInputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader<org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="split" type="org.apache.hadoop.mapred.InputSplit"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[InputFormat reading keys, values from SequenceFiles in binary (raw) |
| format.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat --> |
| <!-- start class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader --> |
| <class name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.RecordReader<org.apache.hadoop.io.BytesWritable, org.apache.hadoop.io.BytesWritable>"/> |
| <constructor name="SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <method name="createKey" return="org.apache.hadoop.io.BytesWritable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="createValue" return="org.apache.hadoop.io.BytesWritable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getKeyClassName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Retrieve the name of the key class for this SequenceFile. |
| @see org.apache.hadoop.io.SequenceFile.Reader#getKeyClassName]]> |
| </doc> |
| </method> |
| <method name="getValueClassName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Retrieve the name of the value class for this SequenceFile. |
| @see org.apache.hadoop.io.SequenceFile.Reader#getValueClassName]]> |
| </doc> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.BytesWritable"/> |
| <param name="val" type="org.apache.hadoop.io.BytesWritable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read raw bytes from a SequenceFile.]]> |
| </doc> |
| </method> |
| <method name="getPos" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getProgress" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the progress within the input split |
| @return 0.0 to 1.0 of the input byte range]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Read records from a SequenceFile as binary (raw) bytes.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.SequenceFileAsBinaryInputFormat.SequenceFileAsBinaryRecordReader --> |
| <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat --> |
| <class name="SequenceFileAsTextInputFormat" extends="org.apache.hadoop.mapred.SequenceFileInputFormat<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SequenceFileAsTextInputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="split" type="org.apache.hadoop.mapred.InputSplit"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[This class is similar to SequenceFileInputFormat, except it generates SequenceFileAsTextRecordReader |
| which converts the input keys and values to their String forms by calling toString() method.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextInputFormat --> |
| <!-- start class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader --> |
| <class name="SequenceFileAsTextRecordReader" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.RecordReader<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>"/> |
| <constructor name="SequenceFileAsTextRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <method name="createKey" return="org.apache.hadoop.io.Text" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="createValue" return="org.apache.hadoop.io.Text" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.Text"/> |
| <param name="value" type="org.apache.hadoop.io.Text"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read key/value pair in a line.]]> |
| </doc> |
| </method> |
| <method name="getProgress" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getPos" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[This class converts the input keys and values to their String forms by calling toString() |
| method. This class to SequenceFileAsTextInputFormat class is as LineRecordReader |
| class to TextInputFormat class.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.SequenceFileAsTextRecordReader --> |
| <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter --> |
| <class name="SequenceFileInputFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFormat<K, V>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SequenceFileInputFilter" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader<K, V>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="split" type="org.apache.hadoop.mapred.InputSplit"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a record reader for the given split |
| @param split file split |
| @param job job configuration |
| @param reporter reporter who sends report to task tracker |
| @return RecordReader]]> |
| </doc> |
| </method> |
| <method name="setFilterClass" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="filterClass" type="java.lang.Class"/> |
| <doc> |
| <![CDATA[set the filter class |
| |
| @param conf application configuration |
| @param filterClass filter class]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A class that allows a map/red job to work on a sample of sequence files. |
| The sample is decided by the filter class set by the job.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter --> |
| <!-- start interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter --> |
| <interface name="SequenceFileInputFilter.Filter" abstract="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.conf.Configurable"/> |
| <method name="accept" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.Writable"/> |
| <doc> |
| <![CDATA[filter function |
| Decide if a record should be filtered or not |
| @param key record key |
| @return true if a record is accepted; return false otherwise]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[filter interface]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.SequenceFileInputFilter.Filter --> |
| <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase --> |
| <class name="SequenceFileInputFilter.FilterBase" extends="java.lang.Object" |
| abstract="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.SequenceFileInputFilter.Filter"/> |
| <constructor name="SequenceFileInputFilter.FilterBase" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getConf" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[base calss for Filters]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase --> |
| <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter --> |
| <class name="SequenceFileInputFilter.MD5Filter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SequenceFileInputFilter.MD5Filter" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setFrequency" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="frequency" type="int"/> |
| <doc> |
| <![CDATA[set the filtering frequency in configuration |
| |
| @param conf configuration |
| @param frequency filtering frequency]]> |
| </doc> |
| </method> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[configure the filter according to configuration |
| |
| @param conf configuration]]> |
| </doc> |
| </method> |
| <method name="accept" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.Writable"/> |
| <doc> |
| <![CDATA[Filtering method |
| If MD5(key) % frequency==0, return true; otherwise return false |
| @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(org.apache.hadoop.io.Writable)]]> |
| </doc> |
| </method> |
| <field name="MD5_LEN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[This class returns a set of records by examing the MD5 digest of its |
| key against a filtering frequency <i>f</i>. The filtering criteria is |
| MD5(key) % f == 0.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.MD5Filter --> |
| <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter --> |
| <class name="SequenceFileInputFilter.PercentFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SequenceFileInputFilter.PercentFilter" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setFrequency" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="frequency" type="int"/> |
| <doc> |
| <![CDATA[set the frequency and stores it in conf |
| @param conf configuration |
| @param frequency filtering frequencey]]> |
| </doc> |
| </method> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[configure the filter by checking the configuration |
| |
| @param conf configuration]]> |
| </doc> |
| </method> |
| <method name="accept" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.Writable"/> |
| <doc> |
| <![CDATA[Filtering method |
| If record# % frequency==0, return true; otherwise return false |
| @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(org.apache.hadoop.io.Writable)]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class returns a percentage of records |
| The percentage is determined by a filtering frequency <i>f</i> using |
| the criteria record# % f == 0. |
| For example, if the frequency is 10, one out of 10 records is returned.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.PercentFilter --> |
| <!-- start class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter --> |
| <class name="SequenceFileInputFilter.RegexFilter" extends="org.apache.hadoop.mapred.SequenceFileInputFilter.FilterBase" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SequenceFileInputFilter.RegexFilter" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setPattern" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="regex" type="java.lang.String"/> |
| <exception name="PatternSyntaxException" type="java.util.regex.PatternSyntaxException"/> |
| <doc> |
| <![CDATA[Define the filtering regex and stores it in conf |
| @param conf where the regex is set |
| @param regex regex used as a filter]]> |
| </doc> |
| </method> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[configure the Filter by checking the configuration]]> |
| </doc> |
| </method> |
| <method name="accept" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.Writable"/> |
| <doc> |
| <![CDATA[Filtering method |
| If key matches the regex, return true; otherwise return false |
| @see org.apache.hadoop.mapred.SequenceFileInputFilter.Filter#accept(org.apache.hadoop.io.Writable)]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Records filter by matching key to regex]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.SequenceFileInputFilter.RegexFilter --> |
| <!-- start class org.apache.hadoop.mapred.SequenceFileInputFormat --> |
| <class name="SequenceFileInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat<K, V>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SequenceFileInputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="listPaths" return="org.apache.hadoop.fs.Path[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader<K, V>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="split" type="org.apache.hadoop.mapred.InputSplit"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[An {@link InputFormat} for {@link SequenceFile}s.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.SequenceFileInputFormat --> |
| <!-- start class org.apache.hadoop.mapred.SequenceFileOutputFormat --> |
| <class name="SequenceFileOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat<org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SequenceFileOutputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter<org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="name" type="java.lang.String"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getReaders" return="org.apache.hadoop.io.SequenceFile.Reader[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="dir" type="org.apache.hadoop.fs.Path"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Open the output generated by this format.]]> |
| </doc> |
| </method> |
| <method name="getOutputCompressionType" return="org.apache.hadoop.io.SequenceFile.CompressionType" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Get the {@link CompressionType} for the output {@link SequenceFile}. |
| @param conf the {@link JobConf} |
| @return the {@link CompressionType} for the output {@link SequenceFile}, |
| defaulting to {@link CompressionType#RECORD}]]> |
| </doc> |
| </method> |
| <method name="setOutputCompressionType" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="style" type="org.apache.hadoop.io.SequenceFile.CompressionType"/> |
| <doc> |
| <![CDATA[Set the {@link CompressionType} for the output {@link SequenceFile}. |
| @param conf the {@link JobConf} to modify |
| @param style the {@link CompressionType} for the output |
| {@link SequenceFile}]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[An {@link OutputFormat} that writes {@link SequenceFile}s.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.SequenceFileOutputFormat --> |
| <!-- start class org.apache.hadoop.mapred.SequenceFileRecordReader --> |
| <class name="SequenceFileRecordReader" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.RecordReader<K, V>"/> |
| <constructor name="SequenceFileRecordReader" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.mapred.FileSplit" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <method name="getKeyClass" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The class of key that must be passed to {@link |
| #next(WritableComparable,Writable)}..]]> |
| </doc> |
| </method> |
| <method name="getValueClass" return="java.lang.Class" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The class of value that must be passed to {@link |
| #next(WritableComparable,Writable)}..]]> |
| </doc> |
| </method> |
| <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="createValue" return="V extends org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <param name="value" type="V extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getCurrentValue" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="value" type="V extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getProgress" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the progress within the input split |
| @return 0.0 to 1.0 of the input byte range]]> |
| </doc> |
| </method> |
| <method name="getPos" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="seek" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="pos" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <field name="conf" type="org.apache.hadoop.conf.Configuration" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[An {@link RecordReader} for {@link SequenceFile}s.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.SequenceFileRecordReader --> |
| <!-- start class org.apache.hadoop.mapred.StatusHttpServer --> |
| <class name="StatusHttpServer" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="StatusHttpServer" type="java.lang.String, java.lang.String, int, boolean" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a status server on the given port. |
| The jsp scripts are taken from src/webapps/<name>. |
| @param name The name of the server |
| @param port The port to use on the server |
| @param findPort whether the server should start at the given port and |
| increment by 1 until it finds a free port.]]> |
| </doc> |
| </constructor> |
| <method name="setAttribute" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="value" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Set a value in the webapp context. These values are available to the jsp |
| pages as "application.getAttribute(name)". |
| @param name The name of the attribute |
| @param value The value of the attribute]]> |
| </doc> |
| </method> |
| <method name="addServlet" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="pathSpec" type="java.lang.String"/> |
| <param name="servletClass" type="java.lang.Class<T>"/> |
| <doc> |
| <![CDATA[Add a servlet in the server. |
| @param name The name of the servlet (can be passed as null) |
| @param pathSpec The path spec for the servlet |
| @param servletClass The servlet class]]> |
| </doc> |
| </method> |
| <method name="getAttribute" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Get the value in the webapp context. |
| @param name The name of the attribute |
| @return The value of the attribute]]> |
| </doc> |
| </method> |
| <method name="getPort" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the port that the server is on |
| @return the port]]> |
| </doc> |
| </method> |
| <method name="setThreads" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="min" type="int"/> |
| <param name="max" type="int"/> |
| </method> |
| <method name="addSslListener" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="addr" type="java.net.InetSocketAddress"/> |
| <param name="keystore" type="java.lang.String"/> |
| <param name="storPass" type="java.lang.String"/> |
| <param name="keyPass" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Configure an ssl listener on the server. |
| @param addr address to listen on |
| @param keystore location of the keystore |
| @param storPass password for the keystore |
| @param keyPass password for the key]]> |
| </doc> |
| </method> |
| <method name="start" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Start the server. Does not wait for the server to start.]]> |
| </doc> |
| </method> |
| <method name="stop" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="InterruptedException" type="java.lang.InterruptedException"/> |
| <doc> |
| <![CDATA[stop the server]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Create a Jetty embedded server to answer http requests. The primary goal |
| is to serve up status information for the server. |
| There are three contexts: |
| "/logs/" -> points to the log directory |
| "/static/" -> points to common static files (src/webapps/static) |
| "/" -> the jsp server code from (src/webapps/<name>)]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.StatusHttpServer --> |
| <!-- start class org.apache.hadoop.mapred.StatusHttpServer.StackServlet --> |
| <class name="StatusHttpServer.StackServlet" extends="javax.servlet.http.HttpServlet" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="StatusHttpServer.StackServlet" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="doGet" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="request" type="javax.servlet.http.HttpServletRequest"/> |
| <param name="response" type="javax.servlet.http.HttpServletResponse"/> |
| <exception name="ServletException" type="javax.servlet.ServletException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A very simple servlet to serve up a text representation of the current |
| stack traces. It both returns the stacks to the caller and logs them. |
| Currently the stack traces are done sequentially rather than exactly the |
| same data.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.StatusHttpServer.StackServlet --> |
| <!-- start class org.apache.hadoop.mapred.StatusHttpServer.TaskGraphServlet --> |
| <class name="StatusHttpServer.TaskGraphServlet" extends="javax.servlet.http.HttpServlet" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="StatusHttpServer.TaskGraphServlet" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="doGet" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="request" type="javax.servlet.http.HttpServletRequest"/> |
| <param name="response" type="javax.servlet.http.HttpServletResponse"/> |
| <exception name="ServletException" type="javax.servlet.ServletException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <field name="width" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[height of the graph w/o margins]]> |
| </doc> |
| </field> |
| <field name="height" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[height of the graph w/o margins]]> |
| </doc> |
| </field> |
| <field name="ymargin" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[margin space on y axis]]> |
| </doc> |
| </field> |
| <field name="xmargin" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[margin space on x axis]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[The servlet that outputs svg graphics for map / reduce task |
| statuses]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.StatusHttpServer.TaskGraphServlet --> |
| <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent --> |
| <class name="TaskCompletionEvent" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <constructor name="TaskCompletionEvent" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Default constructor for Writable.]]> |
| </doc> |
| </constructor> |
| <constructor name="TaskCompletionEvent" type="int, java.lang.String, int, boolean, org.apache.hadoop.mapred.TaskCompletionEvent.Status, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructor. eventId should be created externally and incremented |
| per event for each job. |
| @param eventId event id, event id should be unique and assigned in |
| incrementally, starting from 0. |
| @param taskId task id |
| @param status task's status |
| @param taskTrackerHttp task tracker's host:port for http.]]> |
| </doc> |
| </constructor> |
| <method name="getEventId" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns event Id. |
| @return event id]]> |
| </doc> |
| </method> |
| <method name="getTaskId" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns task id. |
| @return task id]]> |
| </doc> |
| </method> |
| <method name="getTaskStatus" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns enum Status.SUCESS or Status.FAILURE. |
| @return task tracker status]]> |
| </doc> |
| </method> |
| <method name="getTaskTrackerHttp" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[http location of the tasktracker where this task ran. |
| @return http location of tasktracker user logs]]> |
| </doc> |
| </method> |
| <method name="getTaskRunTime" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns time (in millisec) the task took to complete.]]> |
| </doc> |
| </method> |
| <method name="setTaskRunTime" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskCompletionTime" type="int"/> |
| <doc> |
| <![CDATA[Set the task completion time |
| @param taskCompletionTime time (in millisec) the task took to complete]]> |
| </doc> |
| </method> |
| <method name="setEventId" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="eventId" type="int"/> |
| <doc> |
| <![CDATA[set event Id. should be assigned incrementally starting from 0. |
| @param eventId]]> |
| </doc> |
| </method> |
| <method name="setTaskId" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskId" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Sets task id. |
| @param taskId]]> |
| </doc> |
| </method> |
| <method name="setTaskStatus" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="status" type="org.apache.hadoop.mapred.TaskCompletionEvent.Status"/> |
| <doc> |
| <![CDATA[Set task status. |
| @param status]]> |
| </doc> |
| </method> |
| <method name="setTaskTrackerHttp" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskTrackerHttp" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set task tracker http location. |
| @param taskTrackerHttp]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="isMapTask" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="idWithinJob" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <field name="EMPTY_ARRAY" type="org.apache.hadoop.mapred.TaskCompletionEvent[]" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[This is used to track task completion events on |
| job tracker.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent --> |
| <!-- start class org.apache.hadoop.mapred.TaskCompletionEvent.Status --> |
| <class name="TaskCompletionEvent.Status" extends="java.lang.Enum<org.apache.hadoop.mapred.TaskCompletionEvent.Status>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.mapred.TaskCompletionEvent.Status" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.TaskCompletionEvent.Status --> |
| <!-- start class org.apache.hadoop.mapred.TaskLog --> |
| <class name="TaskLog" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="TaskLog" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getTaskLogFile" return="java.io.File" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskid" type="java.lang.String"/> |
| <param name="filter" type="org.apache.hadoop.mapred.TaskLog.LogName"/> |
| </method> |
| <method name="cleanup" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="logsRetainHours" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Purge old user logs. |
| |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getTaskLogLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Get the desired maximum length of task's logs. |
| @param conf the job to look in |
| @return the number of bytes to cap the log files at]]> |
| </doc> |
| </method> |
| <method name="captureOutAndError" return="java.util.List<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="cmd" type="java.util.List<java.lang.String>"/> |
| <param name="stdoutFilename" type="java.io.File"/> |
| <param name="stderrFilename" type="java.io.File"/> |
| <param name="tailLength" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Wrap a command in a shell to capture stdout and stderr to files. |
| If the tailLength is 0, the entire output will be saved. |
| @param cmd The command and the arguments that should be run |
| @param stdoutFilename The filename that stdout should be saved to |
| @param stderrFilename The filename that stderr should be saved to |
| @param tailLength The length of the tail to be saved. |
| @return the modified command that should be run]]> |
| </doc> |
| </method> |
| <method name="captureOutAndError" return="java.util.List<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="setup" type="java.util.List<java.lang.String>"/> |
| <param name="cmd" type="java.util.List<java.lang.String>"/> |
| <param name="stdoutFilename" type="java.io.File"/> |
| <param name="stderrFilename" type="java.io.File"/> |
| <param name="tailLength" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Wrap a command in a shell to capture stdout and stderr to files. |
| Setup commands such as setting memory limit can be passed which |
| will be executed before exec. |
| If the tailLength is 0, the entire output will be saved. |
| @param setup The setup commands for the execed process. |
| @param cmd The command and the arguments that should be run |
| @param stdoutFilename The filename that stdout should be saved to |
| @param stderrFilename The filename that stderr should be saved to |
| @param tailLength The length of the tail to be saved. |
| @return the modified command that should be run]]> |
| </doc> |
| </method> |
| <method name="addCommand" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="cmd" type="java.util.List<java.lang.String>"/> |
| <param name="isExecutable" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Add quotes to each of the command strings and |
| return as a single string |
| @param cmd The command to be quoted |
| @param isExecutable makes shell path if the first |
| argument is executable |
| @return returns The quoted string. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="captureDebugOut" return="java.util.List<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="cmd" type="java.util.List<java.lang.String>"/> |
| <param name="debugoutFilename" type="java.io.File"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Wrap a command in a shell to capture debug script's |
| stdout and stderr to debugout. |
| @param cmd The command and the arguments that should be run |
| @param debugoutFilename The filename that stdout and stderr |
| should be saved to. |
| @return the modified command that should be run |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A simple logger to handle the task-specific user logs. |
| This class uses the system property <code>hadoop.log.dir</code>.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.TaskLog --> |
| <!-- start class org.apache.hadoop.mapred.TaskLog.LogName --> |
| <class name="TaskLog.LogName" extends="java.lang.Enum<org.apache.hadoop.mapred.TaskLog.LogName>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.mapred.TaskLog.LogName[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.mapred.TaskLog.LogName" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[The filter for userlogs.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.TaskLog.LogName --> |
| <!-- start class org.apache.hadoop.mapred.TaskLog.Reader --> |
| <class name="TaskLog.Reader" extends="java.io.InputStream" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="TaskLog.Reader" type="java.lang.String, org.apache.hadoop.mapred.TaskLog.LogName, long, long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read a log file from start to end positions. The offsets may be negative, |
| in which case they are relative to the end of the file. For example, |
| Reader(taskid, kind, 0, -1) is the entire file and |
| Reader(taskid, kind, -4197, -1) is the last 4196 bytes. |
| @param taskid the id of the task to read the log file for |
| @param kind the kind of log to read |
| @param start the offset to read from (negative is relative to tail) |
| @param end the offset to read upto (negative is relative to tail) |
| @throws IOException]]> |
| </doc> |
| </constructor> |
| <method name="read" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="read" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="buffer" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="length" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="available" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.TaskLog.Reader --> |
| <!-- start class org.apache.hadoop.mapred.TaskLogAppender --> |
| <class name="TaskLogAppender" extends="org.apache.log4j.FileAppender" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="TaskLogAppender" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="activateOptions" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="append" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="event" type="org.apache.log4j.spi.LoggingEvent"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getTaskId" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Getter/Setter methods for log4j.]]> |
| </doc> |
| </method> |
| <method name="setTaskId" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskId" type="java.lang.String"/> |
| </method> |
| <method name="getTotalLogFileSize" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="setTotalLogFileSize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="logSize" type="long"/> |
| </method> |
| <doc> |
| <![CDATA[A simple log4j-appender for the task child's |
| map-reduce system logs.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.TaskLogAppender --> |
| <!-- start class org.apache.hadoop.mapred.TaskLogServlet --> |
| <class name="TaskLogServlet" extends="javax.servlet.http.HttpServlet" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="TaskLogServlet" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="doGet" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="request" type="javax.servlet.http.HttpServletRequest"/> |
| <param name="response" type="javax.servlet.http.HttpServletResponse"/> |
| <exception name="ServletException" type="javax.servlet.ServletException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the logs via http.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A servlet that is run by the TaskTrackers to provide the task logs via http.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.TaskLogServlet --> |
| <!-- start class org.apache.hadoop.mapred.TaskReport --> |
| <class name="TaskReport" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <constructor name="TaskReport" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getTaskId" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The id of the task.]]> |
| </doc> |
| </method> |
| <method name="getProgress" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The amount completed, between zero and one.]]> |
| </doc> |
| </method> |
| <method name="getState" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The most recent state, reported by a {@link Reporter}.]]> |
| </doc> |
| </method> |
| <method name="getDiagnostics" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[A list of error messages.]]> |
| </doc> |
| </method> |
| <method name="getCounters" return="org.apache.hadoop.mapred.Counters" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[A table of counters.]]> |
| </doc> |
| </method> |
| <method name="getFinishTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get finish time of task. |
| @return 0, if finish time was not set else returns finish time.]]> |
| </doc> |
| </method> |
| <method name="getStartTime" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get start time of task. |
| @return 0 if start time was not set, else start time.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A report on the state of a task.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.TaskReport --> |
| <!-- start class org.apache.hadoop.mapred.TaskTracker --> |
| <class name="TaskTracker" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.MRConstants"/> |
| <implements name="org.apache.hadoop.mapred.TaskUmbilicalProtocol"/> |
| <implements name="java.lang.Runnable"/> |
| <constructor name="TaskTracker" type="org.apache.hadoop.mapred.JobConf" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Start with the local machine name, and the default JobTracker]]> |
| </doc> |
| </constructor> |
| <method name="getTaskTrackerMetrics" return="org.apache.hadoop.mapred.TaskTracker.TaskTrackerMetrics" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getProtocolVersion" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="protocol" type="java.lang.String"/> |
| <param name="clientVersion" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="cleanupStorage" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Removes all contents of temporary storage. Called upon |
| startup, to remove any leftovers from previous run.]]> |
| </doc> |
| </method> |
| <method name="shutdown" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Close down the TaskTracker and all its components. We must also shutdown |
| any running tasks or threads, and cleanup disk space. A new TaskTracker |
| within the same process space might be restarted, so everything must be |
| clean.]]> |
| </doc> |
| </method> |
| <method name="getJobClient" return="org.apache.hadoop.mapred.InterTrackerProtocol" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The connection to the JobTracker, used by the TaskRunner |
| for locating remote files.]]> |
| </doc> |
| </method> |
| <method name="getTaskTrackerReportAddress" return="java.net.InetSocketAddress" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the port at which the tasktracker bound to]]> |
| </doc> |
| </method> |
| <method name="run" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The server retry loop. |
| This while-loop attempts to connect to the JobTracker. It only |
| loops when the old TaskTracker has gone bad (its state is |
| stale somehow) and we need to reinitialize everything.]]> |
| </doc> |
| </method> |
| <method name="getTask" return="org.apache.hadoop.mapred.Task" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskid" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Called upon startup by the child process, to fetch Task data.]]> |
| </doc> |
| </method> |
| <method name="statusUpdate" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskid" type="java.lang.String"/> |
| <param name="taskStatus" type="org.apache.hadoop.mapred.TaskStatus"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Called periodically to report Task progress, from 0.0 to 1.0.]]> |
| </doc> |
| </method> |
| <method name="reportDiagnosticInfo" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskid" type="java.lang.String"/> |
| <param name="info" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Called when the task dies before completion, and we want to report back |
| diagnostic info]]> |
| </doc> |
| </method> |
| <method name="ping" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskid" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Child checking to see if we're alive. Normally does nothing.]]> |
| </doc> |
| </method> |
| <method name="done" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskid" type="java.lang.String"/> |
| <param name="shouldPromote" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The task is done.]]> |
| </doc> |
| </method> |
| <method name="shuffleError" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskId" type="java.lang.String"/> |
| <param name="message" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[A reduce-task failed to shuffle the map-outputs. Kill the task.]]> |
| </doc> |
| </method> |
| <method name="fsError" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskId" type="java.lang.String"/> |
| <param name="message" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[A child task had a local filesystem error. Kill the task.]]> |
| </doc> |
| </method> |
| <method name="getMapCompletionEvents" return="org.apache.hadoop.mapred.TaskCompletionEvent[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobId" type="java.lang.String"/> |
| <param name="fromEventId" type="int"/> |
| <param name="maxLocs" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="mapOutputLost" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="taskid" type="java.lang.String"/> |
| <param name="errorMsg" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[A completed map task's output has been lost.]]> |
| </doc> |
| </method> |
| <method name="isIdle" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Is this task tracker idle? |
| @return has this task tracker finished and cleaned up all of its tasks?]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="argv" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[Start the TaskTracker, point toward the indicated JobTracker]]> |
| </doc> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[TaskTracker is a process that starts and tracks MR Tasks |
| in a networked environment. It contacts the JobTracker |
| for Task assignments and reporting results.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.TaskTracker --> |
| <!-- start class org.apache.hadoop.mapred.TaskTracker.Child --> |
| <class name="TaskTracker.Child" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="TaskTracker.Child" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Throwable" type="java.lang.Throwable"/> |
| </method> |
| <doc> |
| <![CDATA[The main() for child processes.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.TaskTracker.Child --> |
| <!-- start class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet --> |
| <class name="TaskTracker.MapOutputServlet" extends="javax.servlet.http.HttpServlet" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="TaskTracker.MapOutputServlet" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="doGet" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="request" type="javax.servlet.http.HttpServletRequest"/> |
| <param name="response" type="javax.servlet.http.HttpServletResponse"/> |
| <exception name="ServletException" type="javax.servlet.ServletException"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[This class is used in TaskTracker's Jetty to serve the map outputs |
| to other nodes.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.TaskTracker.MapOutputServlet --> |
| <!-- start class org.apache.hadoop.mapred.TaskTracker.TaskTrackerMetrics --> |
| <class name="TaskTracker.TaskTrackerMetrics" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.metrics.Updater"/> |
| <method name="doUpdates" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/> |
| <doc> |
| <![CDATA[Since this object is a registered updater, this method will be called |
| periodically, e.g. every 5 seconds.]]> |
| </doc> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.TaskTracker.TaskTrackerMetrics --> |
| <!-- start class org.apache.hadoop.mapred.TextInputFormat --> |
| <class name="TextInputFormat" extends="org.apache.hadoop.mapred.FileInputFormat<org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.JobConfigurable"/> |
| <constructor name="TextInputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| </method> |
| <method name="isSplitable" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="file" type="org.apache.hadoop.fs.Path"/> |
| </method> |
| <method name="getRecordReader" return="org.apache.hadoop.mapred.RecordReader<org.apache.hadoop.io.LongWritable, org.apache.hadoop.io.Text>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="genericSplit" type="org.apache.hadoop.mapred.InputSplit"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[An {@link InputFormat} for plain text files. Files are broken into lines. |
| Either linefeed or carriage-return are used to signal end of line. Keys are |
| the position in the file, and values are the line of text..]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.TextInputFormat --> |
| <!-- start class org.apache.hadoop.mapred.TextOutputFormat --> |
| <class name="TextOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat<K, V>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="TextOutputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter<K, V>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="name" type="java.lang.String"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[An {@link OutputFormat} that writes plain text files.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.TextOutputFormat --> |
| <!-- start class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter --> |
| <class name="TextOutputFormat.LineRecordWriter" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.RecordWriter<K, V>"/> |
| <constructor name="TextOutputFormat.LineRecordWriter" type="java.io.DataOutputStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="write" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K"/> |
| <param name="value" type="V"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.TextOutputFormat.LineRecordWriter --> |
| <doc> |
| <![CDATA[<p>A software framework for easily writing applications which process vast |
| amounts of data (multi-terabyte data-sets) parallelly on large clusters |
| (thousands of nodes) built of commodity hardware in a reliable, fault-tolerant |
| manner.</p> |
| |
| <p>A Map-Reduce <i>job</i> usually splits the input data-set into independent |
| chunks which processed by <i>map</i> tasks in completely parallel manner, |
| followed by <i>reduce</i> tasks which aggregating their output. Typically both |
| the input and the output of the job are stored in a |
| {@link org.apache.hadoop.fs.FileSystem}. The framework takes care of monitoring |
| tasks and re-executing failed ones. Since, usually, the compute nodes and the |
| storage nodes are the same i.e. Hadoop's Map-Reduce framework and Distributed |
| FileSystem are running on the same set of nodes, tasks are effectively scheduled |
| on the nodes where data is already present, resulting in very high aggregate |
| bandwidth across the cluster.</p> |
| |
| <p>The Map-Reduce framework operates exclusively on <tt><key, value></tt> |
| pairs i.e. the input to the job is viewed as a set of <tt><key, value></tt> |
| pairs and the output as another, possibly different, set of |
| <tt><key, value></tt> pairs. The <tt>key</tt>s and <tt>value</tt>s have to |
| be serializable as {@link org.apache.hadoop.io.Writable}s and additionally the |
| <tt>key</tt>s have to be {@link org.apache.hadoop.io.WritableComparable}s in |
| order to facilitate grouping by the framework.</p> |
| |
| <p>Data flow:</p> |
| <pre> |
| (input) |
| <tt><k1, v1></tt> |
| |
| | |
| V |
| |
| <b>map</b> |
| |
| | |
| V |
| |
| <tt><k2, v2></tt> |
| |
| | |
| V |
| |
| <b>combine</b> |
| |
| | |
| V |
| |
| <tt><k2, v2></tt> |
| |
| | |
| V |
| |
| <b>reduce</b> |
| |
| | |
| V |
| |
| <tt><k3, v3></tt> |
| (output) |
| </pre> |
| |
| <p>Applications typically implement |
| {@link org.apache.hadoop.mapred.Mapper#map(Object, Object, OutputCollector, Reporter)} |
| and |
| {@link org.apache.hadoop.mapred.Reducer#reduce(Object, Iterator, OutputCollector, Reporter)} |
| methods. The application-writer also specifies various facets of the job such |
| as input and output locations, the <tt>Partitioner</tt>, <tt>InputFormat</tt> |
| & <tt>OutputFormat</tt> implementations to be used etc. as |
| a {@link org.apache.hadoop.mapred.JobConf}. The client program, |
| {@link org.apache.hadoop.mapred.JobClient}, then submits the job to the framework |
| and optionally monitors it.</p> |
| |
| <p>The framework spawns one map task per |
| {@link org.apache.hadoop.mapred.InputSplit} generated by the |
| {@link org.apache.hadoop.mapred.InputFormat} of the job and calls |
| {@link org.apache.hadoop.mapred.Mapper#map(Object, Object, OutputCollector, Reporter)} |
| with each <key, value> pair read by the |
| {@link org.apache.hadoop.mapred.RecordReader} from the <tt>InputSplit</tt> for |
| the task. The intermediate outputs of the maps are then grouped by <tt>key</tt>s |
| and optionally aggregated by <i>combiner</i>. The key space of intermediate |
| outputs are paritioned by the {@link org.apache.hadoop.mapred.Partitioner}, where |
| the number of partitions is exactly the number of reduce tasks for the job.</p> |
| |
| <p>The reduce tasks fetch the sorted intermediate outputs of the maps, via http, |
| merge the <key, value> pairs and call |
| {@link org.apache.hadoop.mapred.Reducer#reduce(Object, Iterator, OutputCollector, Reporter)} |
| for each <key, list of values> pair. The output of the reduce tasks' is |
| stored on the <tt>FileSystem</tt> by the |
| {@link org.apache.hadoop.mapred.RecordWriter} provided by the |
| {@link org.apache.hadoop.mapred.OutputFormat} of the job.</p> |
| |
| <p>Map-Reduce application to perform a distributed <i>grep</i>:</p> |
| <pre><tt> |
| public class Grep extends Configured implements Tool { |
| |
| // <i>map: Search for the pattern specified by 'grep.mapper.regex' &</i> |
| // <i>'grep.mapper.regex.group'</i> |
| |
| class GrepMapper<K, Text> |
| extends MapReduceBase implements Mapper<K, Text, Text, LongWritable> { |
| |
| private Pattern pattern; |
| private int group; |
| |
| public void configure(JobConf job) { |
| pattern = Pattern.compile(job.get("grep.mapper.regex")); |
| group = job.getInt("grep.mapper.regex.group", 0); |
| } |
| |
| public void map(K key, Text value, |
| OutputCollector<Text, LongWritable> output, |
| Reporter reporter) |
| throws IOException { |
| String text = value.toString(); |
| Matcher matcher = pattern.matcher(text); |
| while (matcher.find()) { |
| output.collect(new Text(matcher.group(group)), new LongWritable(1)); |
| } |
| } |
| } |
| |
| // <i>reduce: Count the number of occurrences of the pattern</i> |
| |
| class GrepReducer<K> extends MapReduceBase |
| implements Reducer<K, LongWritable, K, LongWritable> { |
| |
| public void reduce(K key, Iterator<LongWritable> values, |
| OutputCollector<K, LongWritable> output, |
| Reporter reporter) |
| throws IOException { |
| |
| // sum all values for this key |
| long sum = 0; |
| while (values.hasNext()) { |
| sum += values.next().get(); |
| } |
| |
| // output sum |
| output.collect(key, new LongWritable(sum)); |
| } |
| } |
| |
| public int run(String[] args) throws Exception { |
| if (args.length < 3) { |
| System.out.println("Grep <inDir> <outDir> <regex> [<group>]"); |
| ToolRunner.printGenericCommandUsage(System.out); |
| return -1; |
| } |
| |
| JobConf grepJob = new JobConf(getConf(), Grep.class); |
| |
| grepJob.setJobName("grep"); |
| |
| grepJob.setInputPath(new Path(args[0])); |
| grepJob.setOutputPath(args[1]); |
| |
| grepJob.setMapperClass(GrepMapper.class); |
| grepJob.setCombinerClass(GrepReducer.class); |
| grepJob.setReducerClass(GrepReducer.class); |
| |
| grepJob.set("mapred.mapper.regex", args[2]); |
| if (args.length == 4) |
| grepJob.set("mapred.mapper.regex.group", args[3]); |
| |
| grepJob.setOutputFormat(SequenceFileOutputFormat.class); |
| grepJob.setOutputKeyClass(Text.class); |
| grepJob.setOutputValueClass(LongWritable.class); |
| |
| JobClient.runJob(grepJob); |
| |
| return 0; |
| } |
| |
| public static void main(String[] args) throws Exception { |
| int res = ToolRunner.run(new Configuration(), new Grep(), args); |
| System.exit(res); |
| } |
| |
| } |
| </tt></pre> |
| |
| <p>Notice how the data-flow of the above grep job is very similar to doing the |
| same via the unix pipeline:</p> |
| |
| <pre> |
| cat input/* | grep | sort | uniq -c > out |
| </pre> |
| |
| <pre> |
| input | map | shuffle | reduce > out |
| </pre> |
| |
| <p>Hadoop Map-Reduce applications need not be written in |
| Java<small><sup>TM</sup></small> only. |
| <a href="../streaming/package-summary.html">Hadoop Streaming</a> is a utility |
| which allows users to create and run jobs with any executables (e.g. shell |
| utilities) as the mapper and/or the reducer. |
| <a href="pipes/package-summary.html">Hadoop Pipes</a> is a |
| <a href="http://www.swig.org/">SWIG</a>-compatible <em>C++ API</em> to implement |
| Map-Reduce applications (non JNI<small><sup>TM</sup></small> based).</p> |
| |
| <p>See <a href="http://labs.google.com/papers/mapreduce.html">Google's original |
| Map/Reduce paper</a> for background information.</p> |
| |
| <p><i>Java and JNI are trademarks or registered trademarks of |
| Sun Microsystems, Inc. in the United States and other countries.</i></p>]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.mapred.jobcontrol"> |
| <!-- start class org.apache.hadoop.mapred.jobcontrol.Job --> |
| <class name="Job" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Job" type="org.apache.hadoop.mapred.JobConf, java.util.ArrayList<org.apache.hadoop.mapred.jobcontrol.Job>" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct a job. |
| @param jobConf a mapred job configuration representing a job to be executed. |
| @param dependingJobs an array of jobs the current job depends on]]> |
| </doc> |
| </constructor> |
| <constructor name="Job" type="org.apache.hadoop.mapred.JobConf" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct a job. |
| |
| @param jobConf mapred job configuration representing a job to be executed. |
| @throws IOException]]> |
| </doc> |
| </constructor> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getJobName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the job name of this job]]> |
| </doc> |
| </method> |
| <method name="setJobName" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the job name for this job. |
| @param jobName the job name]]> |
| </doc> |
| </method> |
| <method name="getJobID" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the job ID of this job]]> |
| </doc> |
| </method> |
| <method name="setJobID" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="id" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the job ID for this job. |
| @param id the job ID]]> |
| </doc> |
| </method> |
| <method name="getMapredJobID" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the mapred ID of this job]]> |
| </doc> |
| </method> |
| <method name="setMapredJobID" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="mapredJobID" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the mapred ID for this job. |
| @param mapredJobID the mapred job ID for this job.]]> |
| </doc> |
| </method> |
| <method name="getJobConf" return="org.apache.hadoop.mapred.JobConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the mapred job conf of this job]]> |
| </doc> |
| </method> |
| <method name="setJobConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Set the mapred job conf for this job. |
| @param jobConf the mapred job conf for this job.]]> |
| </doc> |
| </method> |
| <method name="getState" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the state of this job]]> |
| </doc> |
| </method> |
| <method name="setState" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="state" type="int"/> |
| <doc> |
| <![CDATA[Set the state for this job. |
| @param state the new state for this job.]]> |
| </doc> |
| </method> |
| <method name="getMessage" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the message of this job]]> |
| </doc> |
| </method> |
| <method name="setMessage" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="message" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the message for this job. |
| @param message the message for this job.]]> |
| </doc> |
| </method> |
| <method name="getDependingJobs" return="java.util.ArrayList<org.apache.hadoop.mapred.jobcontrol.Job>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the depending jobs of this job]]> |
| </doc> |
| </method> |
| <method name="addDependingJob" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dependingJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/> |
| <doc> |
| <![CDATA[Add a job to this jobs' dependency list. Dependent jobs can only be added while a Job |
| is waiting to run, not during or afterwards. |
| |
| @param dependingJob Job that this Job depends on. |
| @return <tt>true</tt> if the Job was added.]]> |
| </doc> |
| </method> |
| <method name="isCompleted" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return true if this job is in a complete state]]> |
| </doc> |
| </method> |
| <method name="isReady" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return true if this job is in READY state]]> |
| </doc> |
| </method> |
| <method name="submit" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Submit this job to mapred. The state becomes RUNNING if submission |
| is successful, FAILED otherwise.]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <doc> |
| <![CDATA[@param args]]> |
| </doc> |
| </method> |
| <field name="SUCCESS" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="WAITING" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="RUNNING" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="READY" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="FAILED" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="DEPENDENT_FAILED" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[This class encapsulates a MapReduce job and its dependency. It monitors |
| the states of the depending jobs and updates the state of this job. |
| A job stats in the WAITING state. If it does not have any deoending jobs, or |
| all of the depending jobs are in SUCCESS state, then the job state will become |
| READY. If any depending jobs fail, the job will fail too. |
| When in READY state, the job can be submitted to Hadoop for execution, with |
| the state changing into RUNNING state. From RUNNING state, the job can get into |
| SUCCESS or FAILED state, depending the status of the jon execution.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.jobcontrol.Job --> |
| <!-- start class org.apache.hadoop.mapred.jobcontrol.JobControl --> |
| <class name="JobControl" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.lang.Runnable"/> |
| <constructor name="JobControl" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a job control for a group of jobs. |
| @param groupName a name identifying this group]]> |
| </doc> |
| </constructor> |
| <method name="getWaitingJobs" return="java.util.ArrayList<org.apache.hadoop.mapred.jobcontrol.Job>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the jobs in the waiting state]]> |
| </doc> |
| </method> |
| <method name="getRunningJobs" return="java.util.ArrayList<org.apache.hadoop.mapred.jobcontrol.Job>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the jobs in the running state]]> |
| </doc> |
| </method> |
| <method name="getReadyJobs" return="java.util.ArrayList<org.apache.hadoop.mapred.jobcontrol.Job>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the jobs in the ready state]]> |
| </doc> |
| </method> |
| <method name="getSuccessfulJobs" return="java.util.ArrayList<org.apache.hadoop.mapred.jobcontrol.Job>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the jobs in the success state]]> |
| </doc> |
| </method> |
| <method name="getFailedJobs" return="java.util.ArrayList<org.apache.hadoop.mapred.jobcontrol.Job>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="addJob" return="java.lang.String" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="aJob" type="org.apache.hadoop.mapred.jobcontrol.Job"/> |
| <doc> |
| <![CDATA[Add a new job. |
| @param aJob the new job]]> |
| </doc> |
| </method> |
| <method name="addJobs" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobs" type="java.util.Collection<org.apache.hadoop.mapred.jobcontrol.Job>"/> |
| <doc> |
| <![CDATA[Add a collection of jobs |
| |
| @param jobs]]> |
| </doc> |
| </method> |
| <method name="getState" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the thread state]]> |
| </doc> |
| </method> |
| <method name="stop" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[set the thread state to STOPPING so that the |
| thread will stop when it wakes up.]]> |
| </doc> |
| </method> |
| <method name="suspend" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[suspend the running thread]]> |
| </doc> |
| </method> |
| <method name="resume" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[resume the suspended thread]]> |
| </doc> |
| </method> |
| <method name="allFinished" return="boolean" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="run" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The main loop for the thread. |
| The loop does the following: |
| Check the states of the running jobs |
| Update the states of waiting jobs |
| Submit the jobs in ready state]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class encapsulates a set of MapReduce jobs and its dependency. It tracks |
| the states of the jobs by placing them into different tables according to their |
| states. |
| |
| This class provides APIs for the client app to add a job to the group and to get |
| the jobs in the group in different states. When a |
| job is added, an ID unique to the group is assigned to the job. |
| |
| This class has a thread that submits jobs when they become ready, monitors the |
| states of the running jobs, and updates the states of jobs based on the state changes |
| of their depending jobs states. The class provides APIs for suspending/resuming |
| the thread,and for stopping the thread.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.jobcontrol.JobControl --> |
| <doc> |
| <![CDATA[<p>Utilities for managing dependent jobs.</p>]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.mapred.join"> |
| <!-- start class org.apache.hadoop.mapred.join.ArrayListBackedIterator --> |
| <class name="ArrayListBackedIterator" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.join.ResetableIterator<X>"/> |
| <constructor name="ArrayListBackedIterator" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="ArrayListBackedIterator" type="java.util.ArrayList<X>" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="hasNext" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="X extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="replay" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="X extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="add" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="item" type="X extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="clear" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[This class provides an implementation of ResetableIterator. The |
| implementation uses an {@link java.util.ArrayList} to store elements |
| added to it, replaying them as requested. |
| Prefer {@link StreamBackedIterator}.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.ArrayListBackedIterator --> |
| <!-- start interface org.apache.hadoop.mapred.join.ComposableInputFormat --> |
| <interface name="ComposableInputFormat" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.InputFormat<K, V>"/> |
| <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader<K, V>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="split" type="org.apache.hadoop.mapred.InputSplit"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[Refinement of InputFormat requiring implementors to provide |
| ComposableRecordReader instead of RecordReader.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.join.ComposableInputFormat --> |
| <!-- start interface org.apache.hadoop.mapred.join.ComposableRecordReader --> |
| <interface name="ComposableRecordReader" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.RecordReader<K, V>"/> |
| <implements name="java.lang.Comparable<org.apache.hadoop.mapred.join.ComposableRecordReader<K, ?>>"/> |
| <method name="id" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the position in the collector this class occupies.]]> |
| </doc> |
| </method> |
| <method name="key" return="K extends org.apache.hadoop.io.WritableComparable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the key this RecordReader would supply on a call to next(K,V)]]> |
| </doc> |
| </method> |
| <method name="key" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Clone the key at the head of this RecordReader into the object provided.]]> |
| </doc> |
| </method> |
| <method name="hasNext" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns true if the stream is not empty, but provides no guarantee that |
| a call to next(K,V) will succeed.]]> |
| </doc> |
| </method> |
| <method name="skip" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]> |
| </doc> |
| </method> |
| <method name="accept" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[While key-value pairs from this RecordReader match the given key, register |
| them with the JoinCollector provided.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Additional operations required of a RecordReader to participate in a join.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.join.ComposableRecordReader --> |
| <!-- start class org.apache.hadoop.mapred.join.CompositeInputFormat --> |
| <class name="CompositeInputFormat" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat<K, org.apache.hadoop.mapred.join.TupleWritable>"/> |
| <constructor name="CompositeInputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setFormat" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Interpret a given string as a composite expression. |
| {@code |
| func ::= <ident>([<func>,]*<func>) |
| func ::= tbl(<class>,"<path>") |
| class ::= @see java.lang.Class#forName(java.lang.String) |
| path ::= @see org.apache.hadoop.fs.Path#Path(java.lang.String) |
| } |
| Reads expression from the <tt>mapred.join.expr</tt> property and |
| user-supplied join types from <tt>mapred.join.define.<ident></tt> |
| types. Paths supplied to <tt>tbl</tt> are given as input paths to the |
| InputFormat class listed. |
| @see #compose(java.lang.String, java.lang.Class, java.lang.String...)]]> |
| </doc> |
| </method> |
| <method name="addDefaults" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Adds the default set of identifiers to the parser.]]> |
| </doc> |
| </method> |
| <method name="validateInput" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Verify that this composite has children and that all its children |
| can validate their input.]]> |
| </doc> |
| </method> |
| <method name="getSplits" return="org.apache.hadoop.mapred.InputSplit[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="numSplits" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Build a CompositeInputSplit from the child InputFormats by assigning the |
| ith split from each child to the ith composite split.]]> |
| </doc> |
| </method> |
| <method name="getRecordReader" return="org.apache.hadoop.mapred.join.ComposableRecordReader<K, org.apache.hadoop.mapred.join.TupleWritable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="split" type="org.apache.hadoop.mapred.InputSplit"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Construct a CompositeRecordReader for the children of this InputFormat |
| as defined in the init expression. |
| The outermost join need only be composable, not necessarily a composite. |
| Mandating TupleWritable isn't strictly correct.]]> |
| </doc> |
| </method> |
| <method name="compose" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="inf" type="java.lang.Class<? extends org.apache.hadoop.mapred.InputFormat>"/> |
| <param name="path" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Convenience method for constructing composite formats. |
| Given InputFormat class (inf), path (p) return: |
| {@code tbl(<inf>, <p>) }]]> |
| </doc> |
| </method> |
| <method name="compose" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="op" type="java.lang.String"/> |
| <param name="inf" type="java.lang.Class<? extends org.apache.hadoop.mapred.InputFormat>"/> |
| <param name="path" type="java.lang.String[]"/> |
| <doc> |
| <![CDATA[Convenience method for constructing composite formats. |
| Given operation (op), Object class (inf), set of paths (p) return: |
| {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]> |
| </doc> |
| </method> |
| <method name="compose" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="op" type="java.lang.String"/> |
| <param name="inf" type="java.lang.Class<? extends org.apache.hadoop.mapred.InputFormat>"/> |
| <param name="path" type="org.apache.hadoop.fs.Path[]"/> |
| <doc> |
| <![CDATA[Convenience method for constructing composite formats. |
| Given operation (op), Object class (inf), set of paths (p) return: |
| {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[An InputFormat capable of performing joins over a set of data sources sorted |
| and partitioned the same way. |
| @see #setFormat |
| |
| A user may define new join types by setting the property |
| <tt>mapred.join.define.<ident></tt> to a classname. In the expression |
| <tt>mapred.join.expr</tt>, the identifier will be assumed to be a |
| ComposableRecordReader. |
| <tt>mapred.join.keycomparator</tt> can be a classname used to compare keys |
| in the join. |
| @see JoinRecordReader |
| @see MultiFilterRecordReader]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.CompositeInputFormat --> |
| <!-- start class org.apache.hadoop.mapred.join.CompositeInputSplit --> |
| <class name="CompositeInputSplit" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.InputSplit"/> |
| <constructor name="CompositeInputSplit" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="CompositeInputSplit" type="int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="add" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="s" type="org.apache.hadoop.mapred.InputSplit"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Add an InputSplit to this collection. |
| @throws IOException If capacity was not specified during construction |
| or if capacity has been reached.]]> |
| </doc> |
| </method> |
| <method name="get" return="org.apache.hadoop.mapred.InputSplit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| <doc> |
| <![CDATA[Get ith child InputSplit.]]> |
| </doc> |
| </method> |
| <method name="getLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Return the aggregate length of all child InputSplits currently added.]]> |
| </doc> |
| </method> |
| <method name="getLength" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Get the length of ith child InputSplit.]]> |
| </doc> |
| </method> |
| <method name="getLocations" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Collect a set of hosts from all child InputSplits.]]> |
| </doc> |
| </method> |
| <method name="getLocation" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[getLocations from ith InputSplit.]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write splits in the following format. |
| {@code |
| <count><class1><class2>...<classn><split1><split2>...<splitn> |
| }]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc} |
| @throws IOException If the child InputSplit cannot be read, typically |
| for faliing access checks.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This InputSplit contains a set of child InputSplits. Any InputSplit inserted |
| into this collection must have a public default constructor.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.CompositeInputSplit --> |
| <!-- start class org.apache.hadoop.mapred.join.CompositeRecordReader --> |
| <class name="CompositeRecordReader" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.conf.Configurable"/> |
| <constructor name="CompositeRecordReader" type="int, int, java.lang.Class<? extends org.apache.hadoop.io.WritableComparator>" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a RecordReader with <tt>capacity</tt> children to position |
| <tt>id</tt> in the parent reader. |
| The id of a root CompositeRecordReader is -1 by convention, but relying |
| on this is not recommended.]]> |
| </doc> |
| </constructor> |
| <method name="combine" return="boolean" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="srcs" type="java.lang.Object[]"/> |
| <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/> |
| </method> |
| <method name="id" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the position in the collector this class occupies.]]> |
| </doc> |
| </method> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="getConf" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="getRecordReaderQueue" return="java.util.PriorityQueue<org.apache.hadoop.mapred.join.ComposableRecordReader<K, ?>>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return sorted list of RecordReaders for this composite.]]> |
| </doc> |
| </method> |
| <method name="getComparator" return="org.apache.hadoop.io.WritableComparator" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return comparator defining the ordering for RecordReaders in this |
| composite.]]> |
| </doc> |
| </method> |
| <method name="add" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="rr" type="org.apache.hadoop.mapred.join.ComposableRecordReader<K, ? extends V>"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Add a RecordReader to this collection. |
| The id() of a RecordReader determines where in the Tuple its |
| entry will appear. Adding RecordReaders with the same id has |
| undefined behavior.]]> |
| </doc> |
| </method> |
| <method name="key" return="K extends org.apache.hadoop.io.WritableComparable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the key for the current join or the value at the top of the |
| RecordReader heap.]]> |
| </doc> |
| </method> |
| <method name="key" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Clone the key at the top of this RR into the given object.]]> |
| </doc> |
| </method> |
| <method name="hasNext" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return true if it is possible that this could emit more values.]]> |
| </doc> |
| </method> |
| <method name="skip" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Pass skip key to child RRs.]]> |
| </doc> |
| </method> |
| <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator<X>" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Obtain an iterator over the child RRs apropos of the value type |
| ultimately emitted from this join.]]> |
| </doc> |
| </method> |
| <method name="accept" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[If key provided matches that of this Composite, give JoinCollector |
| iterator over values it may emit.]]> |
| </doc> |
| </method> |
| <method name="fillJoinCollector" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="iterkey" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[For all child RRs offering the key provided, obtain an iterator |
| at that position in the JoinCollector.]]> |
| </doc> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader<K, ?>"/> |
| <doc> |
| <![CDATA[Implement Comparable contract (compare key of join or head of heap |
| with that of another).]]> |
| </doc> |
| </method> |
| <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a new key value common to all child RRs. |
| @throws ClassCastException if key classes differ.]]> |
| </doc> |
| </method> |
| <method name="createInternalValue" return="org.apache.hadoop.mapred.join.TupleWritable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a value to be used internally for joins.]]> |
| </doc> |
| </method> |
| <method name="getPos" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Unsupported (returns zero in all cases).]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Close all child RRs.]]> |
| </doc> |
| </method> |
| <method name="getProgress" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Report progress as the minimum of all child RR progress.]]> |
| </doc> |
| </method> |
| <field name="jc" type="org.apache.hadoop.mapred.join.CompositeRecordReader<K, V, X>.JoinCollector" |
| transient="false" volatile="false" |
| static="false" final="true" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="kids" type="org.apache.hadoop.mapred.join.ComposableRecordReader[]" |
| transient="false" volatile="false" |
| static="false" final="true" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[A RecordReader that can effect joins of RecordReaders sharing a common key |
| type and partitioning.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.CompositeRecordReader --> |
| <!-- start class org.apache.hadoop.mapred.join.InnerJoinRecordReader --> |
| <class name="InnerJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader<K>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="combine" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="srcs" type="java.lang.Object[]"/> |
| <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/> |
| <doc> |
| <![CDATA[Return true iff the tuple is full (all data sources contain this key).]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Full inner join.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.InnerJoinRecordReader --> |
| <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader --> |
| <class name="JoinRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader<K, org.apache.hadoop.io.Writable, org.apache.hadoop.mapred.join.TupleWritable>" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader<K, org.apache.hadoop.mapred.join.TupleWritable>"/> |
| <constructor name="JoinRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class<? extends org.apache.hadoop.io.WritableComparator>" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <param name="value" type="org.apache.hadoop.mapred.join.TupleWritable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Emit the next set of key, value pairs as defined by the child |
| RecordReaders and operation associated with this composite RR.]]> |
| </doc> |
| </method> |
| <method name="createValue" return="org.apache.hadoop.mapred.join.TupleWritable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator<org.apache.hadoop.mapred.join.TupleWritable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return an iterator wrapping the JoinCollector.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Base class for Composite joins returning Tuples of arbitrary Writables.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader --> |
| <!-- start class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator --> |
| <class name="JoinRecordReader.JoinDelegationIterator" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.join.ResetableIterator<org.apache.hadoop.mapred.join.TupleWritable>"/> |
| <constructor name="JoinRecordReader.JoinDelegationIterator" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="hasNext" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="replay" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="org.apache.hadoop.mapred.join.TupleWritable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="add" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="item" type="org.apache.hadoop.mapred.join.TupleWritable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="clear" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[Since the JoinCollector is effecting our operation, we need only |
| provide an iterator proxy wrapping its operation.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.JoinRecordReader.JoinDelegationIterator --> |
| <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader --> |
| <class name="MultiFilterRecordReader" extends="org.apache.hadoop.mapred.join.CompositeRecordReader<K, V, V>" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader<K, V>"/> |
| <constructor name="MultiFilterRecordReader" type="int, org.apache.hadoop.mapred.JobConf, int, java.lang.Class<? extends org.apache.hadoop.io.WritableComparator>" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <method name="emit" return="V extends org.apache.hadoop.io.Writable" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[For each tuple emitted, return a value (typically one of the values |
| in the tuple). |
| Modifying the Writables in the tuple is permitted and unlikely to affect |
| join behavior in most cases, but it is not recommended. It's safer to |
| clone first.]]> |
| </doc> |
| </method> |
| <method name="combine" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="srcs" type="java.lang.Object[]"/> |
| <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/> |
| <doc> |
| <![CDATA[Default implementation offers {@link #emit} every Tuple from the |
| collector (the outer join of child RRs).]]> |
| </doc> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <param name="value" type="V extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="createValue" return="V extends org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="getDelegate" return="org.apache.hadoop.mapred.join.ResetableIterator<V>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return an iterator returning a single value from the tuple. |
| @see MultiFilterDelegationIterator]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Base class for Composite join returning values derived from multiple |
| sources, but generally not tuples.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader --> |
| <!-- start class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator --> |
| <class name="MultiFilterRecordReader.MultiFilterDelegationIterator" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.join.ResetableIterator<V>"/> |
| <constructor name="MultiFilterRecordReader.MultiFilterDelegationIterator" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="hasNext" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="V extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="replay" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="V extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="add" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="item" type="V extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="clear" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[Proxy the JoinCollector, but include callback to emit.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.MultiFilterRecordReader.MultiFilterDelegationIterator --> |
| <!-- start class org.apache.hadoop.mapred.join.OuterJoinRecordReader --> |
| <class name="OuterJoinRecordReader" extends="org.apache.hadoop.mapred.join.JoinRecordReader<K>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="combine" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="srcs" type="java.lang.Object[]"/> |
| <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/> |
| <doc> |
| <![CDATA[Emit everything from the collector.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Full outer join.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.OuterJoinRecordReader --> |
| <!-- start class org.apache.hadoop.mapred.join.OverrideRecordReader --> |
| <class name="OverrideRecordReader" extends="org.apache.hadoop.mapred.join.MultiFilterRecordReader<K, V>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="emit" return="V extends org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="dst" type="org.apache.hadoop.mapred.join.TupleWritable"/> |
| <doc> |
| <![CDATA[Emit the value with the highest position in the tuple.]]> |
| </doc> |
| </method> |
| <method name="fillJoinCollector" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="iterkey" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Instead of filling the JoinCollector with iterators from all |
| data sources, fill only the rightmost for this key. |
| This not only saves space by discarding the other sources, but |
| it also emits the number of key-value pairs in the preferred |
| RecordReader instead of repeating that stream n times, where |
| n is the cardinality of the cross product of the discarded |
| streams for the given key.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Prefer the "rightmost" data source for this key. |
| For example, <tt>override(S1,S2,S3)</tt> will prefer values |
| from S3 over S2, and values from S2 over S1 for all keys |
| emitted from all sources.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.OverrideRecordReader --> |
| <!-- start class org.apache.hadoop.mapred.join.Parser --> |
| <class name="Parser" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Parser" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[Very simple shift-reduce parser for join expressions. |
| |
| This should be sufficient for the user extension permitted now, but ought to |
| be replaced with a parser generator if more complex grammars are supported. |
| In particular, this "shift-reduce" parser has no states. Each set |
| of formals requires a different internal node type, which is responsible for |
| interpreting the list of tokens it receives. This is sufficient for the |
| current grammar, but it has several annoying properties that might inhibit |
| extension. In particular, parenthesis are always function calls; an |
| algebraic or filter grammar would not only require a node type, but must |
| also work around the internals of this parser. |
| |
| For most other cases, adding classes to the hierarchy- particularly by |
| extending JoinRecordReader and MultiFilterRecordReader- is fairly |
| straightforward. One need only override the relevant method(s) (usually only |
| {@link CompositeRecordReader#combine}) and include a property to map its |
| value to an identifier in the parser.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.Parser --> |
| <!-- start class org.apache.hadoop.mapred.join.Parser.Node --> |
| <class name="Parser.Node" extends="java.lang.Object" |
| abstract="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.join.ComposableInputFormat"/> |
| <constructor name="Parser.Node" type="java.lang.String" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="addIdentifier" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="ident" type="java.lang.String"/> |
| <param name="mcstrSig" type="java.lang.Class[]"/> |
| <param name="nodetype" type="java.lang.Class<? extends org.apache.hadoop.mapred.join.Parser.Node>"/> |
| <param name="cl" type="java.lang.Class<? extends org.apache.hadoop.mapred.join.ComposableRecordReader>"/> |
| <exception name="NoSuchMethodException" type="java.lang.NoSuchMethodException"/> |
| <doc> |
| <![CDATA[For a given identifier, add a mapping to the nodetype for the parse |
| tree and to the ComposableRecordReader to be created, including the |
| formals required to invoke the constructor. |
| The nodetype and constructor signature should be filled in from the |
| child node.]]> |
| </doc> |
| </method> |
| <method name="setID" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="id" type="int"/> |
| </method> |
| <method name="setKeyComparator" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="cmpcl" type="java.lang.Class<? extends org.apache.hadoop.io.WritableComparator>"/> |
| </method> |
| <field name="rrCstrMap" type="java.util.Map<java.lang.String, java.lang.reflect.Constructor<? extends org.apache.hadoop.mapred.join.ComposableRecordReader>>" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="id" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="ident" type="java.lang.String" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="cmpcl" type="java.lang.Class<? extends org.apache.hadoop.io.WritableComparator>" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.Parser.Node --> |
| <!-- start class org.apache.hadoop.mapred.join.Parser.NodeToken --> |
| <class name="Parser.NodeToken" extends="org.apache.hadoop.mapred.join.Parser.Token" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.Parser.NodeToken --> |
| <!-- start class org.apache.hadoop.mapred.join.Parser.NumToken --> |
| <class name="Parser.NumToken" extends="org.apache.hadoop.mapred.join.Parser.Token" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Parser.NumToken" type="double" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getNum" return="double" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.Parser.NumToken --> |
| <!-- start class org.apache.hadoop.mapred.join.Parser.StrToken --> |
| <class name="Parser.StrToken" extends="org.apache.hadoop.mapred.join.Parser.Token" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Parser.StrToken" type="org.apache.hadoop.mapred.join.Parser.TType, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getStr" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.Parser.StrToken --> |
| <!-- start class org.apache.hadoop.mapred.join.Parser.Token --> |
| <class name="Parser.Token" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getType" return="org.apache.hadoop.mapred.join.Parser.TType" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getNode" return="org.apache.hadoop.mapred.join.Parser.Node" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getNum" return="double" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getStr" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[Tagged-union type for tokens from the join expression. |
| @see Parser.TType]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.Parser.Token --> |
| <!-- start class org.apache.hadoop.mapred.join.Parser.TType --> |
| <class name="Parser.TType" extends="java.lang.Enum<org.apache.hadoop.mapred.join.Parser.TType>" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <method name="values" return="org.apache.hadoop.mapred.join.Parser.TType[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="valueOf" return="org.apache.hadoop.mapred.join.Parser.TType" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.Parser.TType --> |
| <!-- start interface org.apache.hadoop.mapred.join.ResetableIterator --> |
| <interface name="ResetableIterator" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="hasNext" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[True iff a call to next will succeed.]]> |
| </doc> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="T extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Assign next value to actual. |
| It is required that elements added to a ResetableIterator be returned in |
| the same order after a call to {@link #reset} (FIFO). |
| |
| Note that a call to this may fail for nested joins (i.e. more elements |
| available, but none satisfying the constraints of the join)]]> |
| </doc> |
| </method> |
| <method name="replay" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="T extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Assign last value returned to actual.]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Set iterator to return to the start of its range. Must be called after |
| calling {@link #add} to avoid a ConcurrentModificationException.]]> |
| </doc> |
| </method> |
| <method name="add" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="item" type="T extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Add an element to the collection of elements to iterate over.]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Close datasources and release resources. Calling methods on the iterator |
| after calling close has undefined behavior.]]> |
| </doc> |
| </method> |
| <method name="clear" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Close datasources, but do not release internal resources. Calling this |
| method should permit the object to be reused with a different datasource.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This defines an interface to a stateful Iterator that can replay elements |
| added to it directly. |
| Note that this does not extend {@link java.util.Iterator}.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.join.ResetableIterator --> |
| <!-- start class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY --> |
| <class name="ResetableIterator.EMPTY" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.join.ResetableIterator<U>"/> |
| <constructor name="ResetableIterator.EMPTY" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="hasNext" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="clear" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="U extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="replay" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="U extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="add" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="item" type="U extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.ResetableIterator.EMPTY --> |
| <!-- start class org.apache.hadoop.mapred.join.StreamBackedIterator --> |
| <class name="StreamBackedIterator" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.join.ResetableIterator<X>"/> |
| <constructor name="StreamBackedIterator" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="hasNext" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="X extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="replay" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="X extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="add" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="item" type="X extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="clear" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[This class provides an implementation of ResetableIterator. This |
| implementation uses a byte array to store elements added to it.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.StreamBackedIterator --> |
| <!-- start class org.apache.hadoop.mapred.join.TupleWritable --> |
| <class name="TupleWritable" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <implements name="java.lang.Iterable<org.apache.hadoop.io.Writable>"/> |
| <constructor name="TupleWritable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create an empty tuple with no allocated storage for writables.]]> |
| </doc> |
| </constructor> |
| <constructor name="TupleWritable" type="org.apache.hadoop.io.Writable[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Initialize tuple with storage; unknown whether any of them contain |
| "written" values.]]> |
| </doc> |
| </constructor> |
| <method name="has" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| <doc> |
| <![CDATA[Return true if tuple has an element at the position provided.]]> |
| </doc> |
| </method> |
| <method name="get" return="org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| <doc> |
| <![CDATA[Get ith Writable from Tuple.]]> |
| </doc> |
| </method> |
| <method name="size" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The number of children in this Tuple.]]> |
| </doc> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="other" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="iterator" return="java.util.Iterator<org.apache.hadoop.io.Writable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return an iterator over the elements in this tuple. |
| Note that this doesn't flatten the tuple; one may receive tuples |
| from this iterator.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Convert Tuple to String as in the following. |
| <tt>[<child1>,<child2>,...,<childn>]</tt>]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Writes each Writable to <code>out</code>. |
| TupleWritable format: |
| {@code |
| <count><type1><type2>...<typen><obj1><obj2>...<objn> |
| }]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Writable type storing multiple {@link org.apache.hadoop.io.Writable}s.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.TupleWritable --> |
| <!-- start class org.apache.hadoop.mapred.join.WrappedRecordReader --> |
| <class name="WrappedRecordReader" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.join.ComposableRecordReader<K, U>"/> |
| <method name="id" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[{@inheritDoc}]]> |
| </doc> |
| </method> |
| <method name="key" return="K extends org.apache.hadoop.io.WritableComparable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the key at the head of this RR.]]> |
| </doc> |
| </method> |
| <method name="key" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="qkey" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Clone the key at the head of this RR into the object supplied.]]> |
| </doc> |
| </method> |
| <method name="hasNext" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return true if the RR- including the k,v pair stored in this object- |
| is exhausted.]]> |
| </doc> |
| </method> |
| <method name="skip" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Skip key-value pairs with keys less than or equal to the key provided.]]> |
| </doc> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read the next k,v pair into the head of this object; return true iff |
| the RR and this are exhausted.]]> |
| </doc> |
| </method> |
| <method name="accept" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="org.apache.hadoop.mapred.join.CompositeRecordReader.JoinCollector"/> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Add an iterator to the collector at the position occupied by this |
| RecordReader over the values in this stream paired with the key |
| provided (ie register a stream of values from this source matching K |
| with a collector).]]> |
| </doc> |
| </method> |
| <method name="next" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <param name="value" type="U extends org.apache.hadoop.io.Writable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write key-value pair at the head of this stream to the objects provided; |
| get next key-value pair from proxied RR.]]> |
| </doc> |
| </method> |
| <method name="createKey" return="K extends org.apache.hadoop.io.WritableComparable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Request new key from proxied RR.]]> |
| </doc> |
| </method> |
| <method name="createValue" return="U extends org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Request new value from proxied RR.]]> |
| </doc> |
| </method> |
| <method name="getProgress" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Request progress from proxied RR.]]> |
| </doc> |
| </method> |
| <method name="getPos" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Request position from proxied RR.]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Forward close request to proxied RR.]]> |
| </doc> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="other" type="org.apache.hadoop.mapred.join.ComposableRecordReader<K, ?>"/> |
| <doc> |
| <![CDATA[Implement Comparable contract (compare key at head of proxied RR |
| with that of another).]]> |
| </doc> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="other" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Return true iff compareTo(other) retn true.]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[Proxy class for a RecordReader participating in the join framework. |
| This class keeps track of the "head" key-value pair for the |
| provided RecordReader and keeps a store of values matching a key when |
| this source is participating in a join.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.join.WrappedRecordReader --> |
| <doc> |
| <![CDATA[<p>Given a set of sorted datasets keyed with the same class and yielding equal |
| partitions, it is possible to effect a join of those datasets prior to the map. |
| This could save costs in re-partitioning, sorting, shuffling, and writing out |
| data required in the general case.</p> |
| |
| <h3><a name="Interface"></a>Interface</h3> |
| |
| <p>The attached code offers the following interface to users of these |
| classes.</p> |
| |
| <table> |
| <tr><th>property</th><th>required</th><th>value</th></tr> |
| <tr><td>mapred.join.expr</td><td>yes</td> |
| <td>Join expression to effect over input data</td></tr> |
| <tr><td>mapred.join.keycomparator</td><td>no</td> |
| <td><tt>WritableComparator</tt> class to use for comparing keys</td></tr> |
| <tr><td>mapred.join.define.<ident></td><td>no</td> |
| <td>Class mapped to identifier in join expression</td></tr> |
| </table> |
| |
| <p>The join expression understands the following grammar:</p> |
| |
| <pre>func ::= <ident>([<func>,]*<func>) |
| func ::= tbl(<class>,"<path>"); |
| |
| </pre> |
| |
| <p>Operations included in this patch are partitioned into one of two types: |
| join operations emitting tuples and "multi-filter" operations emitting a |
| single value from (but not necessarily included in) a set of input values. |
| For a given key, each operation will consider the cross product of all |
| values for all sources at that node.</p> |
| |
| <p>Identifiers supported by default:</p> |
| |
| <table> |
| <tr><th>identifier</th><th>type</th><th>description</th></tr> |
| <tr><td>inner</td><td>Join</td><td>Full inner join</td></tr> |
| <tr><td>outer</td><td>Join</td><td>Full outer join</td></tr> |
| <tr><td>override</td><td>MultiFilter</td> |
| <td>For a given key, prefer values from the rightmost source</td></tr> |
| </table> |
| |
| <p>A user of this class must set the <tt>InputFormat</tt> for the job to |
| <tt>CompositeInputFormat</tt> and define a join expression accepted by the |
| preceding grammar. For example, both of the following are acceptable:</p> |
| |
| <pre>inner(tbl(org.apache.hadoop.mapred.SequenceFileInputFormat.class, |
| "hdfs://host:8020/foo/bar"), |
| tbl(org.apache.hadoop.mapred.SequenceFileInputFormat.class, |
| "hdfs://host:8020/foo/baz")) |
| |
| outer(override(tbl(org.apache.hadoop.mapred.SequenceFileInputFormat.class, |
| "hdfs://host:8020/foo/bar"), |
| tbl(org.apache.hadoop.mapred.SequenceFileInputFormat.class, |
| "hdfs://host:8020/foo/baz")), |
| tbl(org.apache.hadoop.mapred/SequenceFileInputFormat.class, |
| "hdfs://host:8020/foo/rab")) |
| </pre> |
| |
| <p><tt>CompositeInputFormat</tt> includes a handful of convenience methods to |
| aid construction of these verbose statements.</p> |
| |
| <p>As in the second example, joins may be nested. Users may provide a |
| comparator class in the <tt>mapred.join.keycomparator</tt> property to specify |
| the ordering of their keys, or accept the default comparator as returned by |
| <tt>WritableComparator.get(keyclass)</tt>.</p> |
| |
| <p>Users can specify their own join operations, typically by overriding |
| <tt>JoinRecordReader</tt> or <tt>MultiFilterRecordReader</tt> and mapping that |
| class to an identifier in the join expression using the |
| <tt>mapred.join.define.<em>ident</em></tt> property, where <em>ident</em> is |
| the identifier appearing in the join expression. Users may elect to emit- or |
| modify- values passing through their join operation. Consulting the existing |
| operations for guidance is recommended. Adding arguments is considerably more |
| complex (and only partially supported), as one must also add a <tt>Node</tt> |
| type to the parse tree. One is probably better off extending |
| <tt>RecordReader</tt> in most cases.</p> |
| |
| <a href="http://issues.apache.org/jira/browse/HADOOP-2085">JIRA</a>]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.mapred.lib"> |
| <!-- start class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce --> |
| <class name="FieldSelectionMapReduce" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.Mapper<K, V, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>"/> |
| <implements name="org.apache.hadoop.mapred.Reducer<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>"/> |
| <constructor name="FieldSelectionMapReduce" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="map" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K"/> |
| <param name="val" type="V"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The identify function. Input key/value pair is written directly to output.]]> |
| </doc> |
| </method> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="reduce" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.Text"/> |
| <param name="values" type="java.util.Iterator<org.apache.hadoop.io.Text>"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[This class implements a mapper/reducer class that can be used to perform |
| field selections in a manner similar to unix cut. The input data is treated |
| as fields separated by a user specified separator (the default value is |
| "\t"). The user can specify a list of fields that form the map output keys, |
| and a list of fields that form the map output values. If the inputformat is |
| TextInputFormat, the mapper will ignore the key to the map function. and the |
| fields are from the value only. Otherwise, the fields are the union of those |
| from the key and those from the value. |
| |
| The field separator is under attribute "mapred.data.field.separator" |
| |
| The map output field list spec is under attribute "map.output.key.value.fields.spec". |
| The value is expected to be like "keyFieldsSpec:valueFieldsSpec" |
| key/valueFieldsSpec are comma (,) separated field spec: fieldSpec,fieldSpec,fieldSpec ... |
| Each field spec can be a simple number (e.g. 5) specifying a specific field, or a range |
| (like 2-5) to specify a range of fields, or an open range (like 3-) specifying all |
| the fields starting from field 3. The open range field spec applies value fields only. |
| They have no effect on the key fields. |
| |
| Here is an example: "4,3,0,1:6,5,1-3,7-". It specifies to use fields 4,3,0 and 1 for keys, |
| and use fields 6,5,1,2,3,7 and above for values. |
| |
| The reduce output field list spec is under attribute "reduce.output.key.value.fields.spec". |
| |
| The reducer extracts output key/value pairs in a similar manner, except that |
| the key is never ignored.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.FieldSelectionMapReduce --> |
| <!-- start class org.apache.hadoop.mapred.lib.HashPartitioner --> |
| <class name="HashPartitioner" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.Partitioner<K2, V2>"/> |
| <constructor name="HashPartitioner" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| </method> |
| <method name="getPartition" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K2"/> |
| <param name="value" type="V2"/> |
| <param name="numReduceTasks" type="int"/> |
| <doc> |
| <![CDATA[Use {@link Object#hashCode()} to partition.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Partition keys by their {@link Object#hashCode()}.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.HashPartitioner --> |
| <!-- start class org.apache.hadoop.mapred.lib.IdentityMapper --> |
| <class name="IdentityMapper" extends="org.apache.hadoop.mapred.MapReduceBase" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.Mapper<K, V, K, V>"/> |
| <constructor name="IdentityMapper" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="map" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K"/> |
| <param name="val" type="V"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<K, V>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The identify function. Input key/value pair is written directly to |
| output.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Implements the identity function, mapping inputs directly to outputs.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.IdentityMapper --> |
| <!-- start class org.apache.hadoop.mapred.lib.IdentityReducer --> |
| <class name="IdentityReducer" extends="org.apache.hadoop.mapred.MapReduceBase" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.Reducer<K, V, K, V>"/> |
| <constructor name="IdentityReducer" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="reduce" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K"/> |
| <param name="values" type="java.util.Iterator<V>"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<K, V>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Writes all keys and values directly to output.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Performs no reduction, writing all input values directly to the output.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.IdentityReducer --> |
| <!-- start class org.apache.hadoop.mapred.lib.InverseMapper --> |
| <class name="InverseMapper" extends="org.apache.hadoop.mapred.MapReduceBase" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.Mapper<K, V, V, K>"/> |
| <constructor name="InverseMapper" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="map" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K"/> |
| <param name="value" type="V"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<V, K>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[The inverse function. Input keys and values are swapped.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A {@link Mapper} that swaps keys and values.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.InverseMapper --> |
| <!-- start class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner --> |
| <class name="KeyFieldBasedPartitioner" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.Partitioner<K2, V2>"/> |
| <constructor name="KeyFieldBasedPartitioner" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| </method> |
| <method name="getPartition" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K2"/> |
| <param name="value" type="V2"/> |
| <param name="numReduceTasks" type="int"/> |
| <doc> |
| <![CDATA[Use {@link Object#hashCode()} to partition.]]> |
| </doc> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.KeyFieldBasedPartitioner --> |
| <!-- start class org.apache.hadoop.mapred.lib.LongSumReducer --> |
| <class name="LongSumReducer" extends="org.apache.hadoop.mapred.MapReduceBase" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.Reducer<K, org.apache.hadoop.io.LongWritable, K, org.apache.hadoop.io.LongWritable>"/> |
| <constructor name="LongSumReducer" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="reduce" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K"/> |
| <param name="values" type="java.util.Iterator<org.apache.hadoop.io.LongWritable>"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<K, org.apache.hadoop.io.LongWritable>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A {@link Reducer} that sums long values.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.LongSumReducer --> |
| <!-- start class org.apache.hadoop.mapred.lib.MultipleOutputFormat --> |
| <class name="MultipleOutputFormat" extends="org.apache.hadoop.mapred.FileOutputFormat<K, V>" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MultipleOutputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter<K, V>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="name" type="java.lang.String"/> |
| <param name="arg3" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a composite record writer that can write key/value data to different |
| output files |
| |
| @param fs |
| the file system to use |
| @param job |
| the job conf for the job |
| @param name |
| the leaf file name for the output file (such as part-00000") |
| @param arg3 |
| a progressable for reporting progress. |
| @return a composite record writer |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="generateLeafFileName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Generate the leaf name for the output file name. The default behavior does |
| not change the leaf file name (such as part-00000) |
| |
| @param name |
| the leaf file name for the output file |
| @return the given leaf file name]]> |
| </doc> |
| </method> |
| <method name="generateFileNameForKeyValue" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <param name="value" type="V extends org.apache.hadoop.io.Writable"/> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Generate the file output file name based on the given key and the leaf file |
| name. The default behavior is that the file name does not depend on the |
| key. |
| |
| @param key |
| the key of the output data |
| @param name |
| the leaf file name |
| @return generated file name]]> |
| </doc> |
| </method> |
| <method name="generateActualKey" return="K extends org.apache.hadoop.io.WritableComparable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <param name="value" type="V extends org.apache.hadoop.io.Writable"/> |
| <doc> |
| <![CDATA[Generate the actual key from the given key/value. The default behavior is that |
| the actual key is equal to the given key |
| |
| @param key |
| the key of the output data |
| @param value |
| the value of the output data |
| @return the actual key derived from the given key/value]]> |
| </doc> |
| </method> |
| <method name="generateActualValue" return="V extends org.apache.hadoop.io.Writable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <param name="value" type="V extends org.apache.hadoop.io.Writable"/> |
| <doc> |
| <![CDATA[Generate the actual value from the given key and value. The default behavior is that |
| the actual value is equal to the given value |
| |
| @param key |
| the key of the output data |
| @param value |
| the value of the output data |
| @return the actual value derived from the given key/value]]> |
| </doc> |
| </method> |
| <method name="getInputFileBasedOutputFileName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Generate the outfile name based on a given anme and the input file name. If |
| the map input file does not exists (i.e. this is not for a map only job), |
| the given name is returned unchanged. If the config value for |
| "num.of.trailing.legs.to.use" is not set, or set 0 or negative, the given |
| name is returned unchanged. Otherwise, return a file name consisting of the |
| N trailing legs of the input file name where N is the config value for |
| "num.of.trailing.legs.to.use". |
| |
| @param job |
| the job config |
| @param name |
| the output file name |
| @return the outfile name based on a given anme and the input file name.]]> |
| </doc> |
| </method> |
| <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter<K, V>" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="name" type="java.lang.String"/> |
| <param name="arg3" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@param fs |
| the file system to use |
| @param job |
| a job conf object |
| @param name |
| the name of the file over which a record writer object will be |
| constructed |
| @param arg3 |
| a progressable object |
| @return A RecordWriter object over the given file |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This abstract class extends the OutputFormatBase, allowing to write the |
| output data to different output files. There are three basic use cases for |
| this class. |
| |
| Case one: This class is used for a map reduce job with at least one reducer. |
| The reducer wants to write data to different files depending on the actual |
| keys. It is assumed that a key (or value) enocodes the actual key (value) |
| and the desired location for the actual key (value). |
| |
| Case two: Tis class is used for a map only job. The job wants to use an |
| output file name that is either a part of the input file name of the input |
| data, or some derivation of it. |
| |
| Case three: This class is used for a map only job. The job wants to use an |
| output file name that depends on both the keys and the input file name,]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.MultipleOutputFormat --> |
| <!-- start class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat --> |
| <class name="MultipleSequenceFileOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat<org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MultipleSequenceFileOutputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter<org.apache.hadoop.io.WritableComparable, org.apache.hadoop.io.Writable>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="name" type="java.lang.String"/> |
| <param name="arg3" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output data |
| to different output files in sequence file output format.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat --> |
| <!-- start class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat --> |
| <class name="MultipleTextOutputFormat" extends="org.apache.hadoop.mapred.lib.MultipleOutputFormat<K, V>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MultipleTextOutputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getBaseRecordWriter" return="org.apache.hadoop.mapred.RecordWriter<K, V>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="fs" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="name" type="java.lang.String"/> |
| <param name="arg3" type="org.apache.hadoop.util.Progressable"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[This class extends the MultipleOutputFormat, allowing to write the output |
| data to different output files in Text output format.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.MultipleTextOutputFormat --> |
| <!-- start class org.apache.hadoop.mapred.lib.MultithreadedMapRunner --> |
| <class name="MultithreadedMapRunner" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.MapRunnable<K1, V1, K2, V2>"/> |
| <constructor name="MultithreadedMapRunner" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/> |
| </method> |
| <method name="run" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="input" type="org.apache.hadoop.mapred.RecordReader<K1, V1>"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<K2, V2>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[Multithreaded implementation for @link org.apache.hadoop.mapred.MapRunnable. |
| <p> |
| It can be used instead of the default implementation, |
| @link org.apache.hadoop.mapred.MapRunner, when the Map operation is not CPU |
| bound in order to improve throughput. |
| <p> |
| Map implementations using this MapRunnable must be thread-safe. |
| <p> |
| The Map-Reduce job has to be configured to use this MapRunnable class (using |
| the JobConf.setMapRunnerClass method) and |
| the number of thread the thread-pool can use with the |
| <code>mapred.map.multithreadedrunner.threads</code> property, its default |
| value is 10 threads. |
| <p>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.MultithreadedMapRunner --> |
| <!-- start class org.apache.hadoop.mapred.lib.NullOutputFormat --> |
| <class name="NullOutputFormat" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.OutputFormat<K, V>"/> |
| <constructor name="NullOutputFormat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getRecordWriter" return="org.apache.hadoop.mapred.RecordWriter<K, V>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="name" type="java.lang.String"/> |
| <param name="progress" type="org.apache.hadoop.util.Progressable"/> |
| </method> |
| <method name="checkOutputSpecs" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ignored" type="org.apache.hadoop.fs.FileSystem"/> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| </method> |
| <doc> |
| <![CDATA[Consume all outputs and put them in /dev/null.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.NullOutputFormat --> |
| <!-- start class org.apache.hadoop.mapred.lib.RegexMapper --> |
| <class name="RegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.Mapper<K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable>"/> |
| <constructor name="RegexMapper" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| </method> |
| <method name="map" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K"/> |
| <param name="value" type="org.apache.hadoop.io.Text"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.RegexMapper --> |
| <!-- start class org.apache.hadoop.mapred.lib.TokenCountMapper --> |
| <class name="TokenCountMapper" extends="org.apache.hadoop.mapred.MapReduceBase" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.Mapper<K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable>"/> |
| <constructor name="TokenCountMapper" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="map" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K"/> |
| <param name="value" type="org.apache.hadoop.io.Text"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A {@link Mapper} that maps text values into <token,freq> pairs. Uses |
| {@link StringTokenizer} to break text into tokens.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.TokenCountMapper --> |
| <doc> |
| <![CDATA[<p>Library of generally useful mappers, reducers, and partitioners.</p>]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.mapred.lib.aggregate"> |
| <!-- start class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum --> |
| <class name="DoubleValueSum" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/> |
| <constructor name="DoubleValueSum" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The default constructor]]> |
| </doc> |
| </constructor> |
| <method name="addNextValue" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[add a value to the aggregator |
| |
| @param val |
| an object whose string representation represents a double value.]]> |
| </doc> |
| </method> |
| <method name="addNextValue" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="double"/> |
| <doc> |
| <![CDATA[add a value to the aggregator |
| |
| @param val |
| a double value.]]> |
| </doc> |
| </method> |
| <method name="getReport" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the string representation of the aggregated value]]> |
| </doc> |
| </method> |
| <method name="getSum" return="double" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the aggregated value]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[reset the aggregator]]> |
| </doc> |
| </method> |
| <method name="getCombinerOutput" return="java.util.ArrayList<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return return an array of one element. The element is a string |
| representation of the aggregated value. The return value is |
| expected to be used by the a combiner.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class implements a value aggregator that sums up a sequence of double |
| values.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.aggregate.DoubleValueSum --> |
| <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMax --> |
| <class name="LongValueMax" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/> |
| <constructor name="LongValueMax" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[the default constructor]]> |
| </doc> |
| </constructor> |
| <method name="addNextValue" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[add a value to the aggregator |
| |
| @param val |
| an object whose string representation represents a long value.]]> |
| </doc> |
| </method> |
| <method name="addNextValue" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="newVal" type="long"/> |
| <doc> |
| <![CDATA[add a value to the aggregator |
| |
| @param newVal |
| a long value.]]> |
| </doc> |
| </method> |
| <method name="getVal" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the aggregated value]]> |
| </doc> |
| </method> |
| <method name="getReport" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the string representation of the aggregated value]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[reset the aggregator]]> |
| </doc> |
| </method> |
| <method name="getCombinerOutput" return="java.util.ArrayList<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return return an array of one element. The element is a string |
| representation of the aggregated value. The return value is |
| expected to be used by the a combiner.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class implements a value aggregator that maintain the maximum of |
| a sequence of long values.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMax --> |
| <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueMin --> |
| <class name="LongValueMin" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/> |
| <constructor name="LongValueMin" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[the default constructor]]> |
| </doc> |
| </constructor> |
| <method name="addNextValue" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[add a value to the aggregator |
| |
| @param val |
| an object whose string representation represents a long value.]]> |
| </doc> |
| </method> |
| <method name="addNextValue" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="newVal" type="long"/> |
| <doc> |
| <![CDATA[add a value to the aggregator |
| |
| @param newVal |
| a long value.]]> |
| </doc> |
| </method> |
| <method name="getVal" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the aggregated value]]> |
| </doc> |
| </method> |
| <method name="getReport" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the string representation of the aggregated value]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[reset the aggregator]]> |
| </doc> |
| </method> |
| <method name="getCombinerOutput" return="java.util.ArrayList<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return return an array of one element. The element is a string |
| representation of the aggregated value. The return value is |
| expected to be used by the a combiner.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class implements a value aggregator that maintain the minimum of |
| a sequence of long values.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueMin --> |
| <!-- start class org.apache.hadoop.mapred.lib.aggregate.LongValueSum --> |
| <class name="LongValueSum" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/> |
| <constructor name="LongValueSum" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[the default constructor]]> |
| </doc> |
| </constructor> |
| <method name="addNextValue" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[add a value to the aggregator |
| |
| @param val |
| an object whose string representation represents a long value.]]> |
| </doc> |
| </method> |
| <method name="addNextValue" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="long"/> |
| <doc> |
| <![CDATA[add a value to the aggregator |
| |
| @param val |
| a long value.]]> |
| </doc> |
| </method> |
| <method name="getSum" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the aggregated value]]> |
| </doc> |
| </method> |
| <method name="getReport" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the string representation of the aggregated value]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[reset the aggregator]]> |
| </doc> |
| </method> |
| <method name="getCombinerOutput" return="java.util.ArrayList<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return return an array of one element. The element is a string |
| representation of the aggregated value. The return value is |
| expected to be used by the a combiner.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class implements a value aggregator that sums up |
| a sequence of long values.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.aggregate.LongValueSum --> |
| <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMax --> |
| <class name="StringValueMax" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/> |
| <constructor name="StringValueMax" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[the default constructor]]> |
| </doc> |
| </constructor> |
| <method name="addNextValue" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[add a value to the aggregator |
| |
| @param val |
| a string.]]> |
| </doc> |
| </method> |
| <method name="getVal" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the aggregated value]]> |
| </doc> |
| </method> |
| <method name="getReport" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the string representation of the aggregated value]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[reset the aggregator]]> |
| </doc> |
| </method> |
| <method name="getCombinerOutput" return="java.util.ArrayList<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return return an array of one element. The element is a string |
| representation of the aggregated value. The return value is |
| expected to be used by the a combiner.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class implements a value aggregator that maintain the biggest of |
| a sequence of strings.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMax --> |
| <!-- start class org.apache.hadoop.mapred.lib.aggregate.StringValueMin --> |
| <class name="StringValueMin" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/> |
| <constructor name="StringValueMin" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[the default constructor]]> |
| </doc> |
| </constructor> |
| <method name="addNextValue" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[add a value to the aggregator |
| |
| @param val |
| a string.]]> |
| </doc> |
| </method> |
| <method name="getVal" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the aggregated value]]> |
| </doc> |
| </method> |
| <method name="getReport" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the string representation of the aggregated value]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[reset the aggregator]]> |
| </doc> |
| </method> |
| <method name="getCombinerOutput" return="java.util.ArrayList<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return return an array of one element. The element is a string |
| representation of the aggregated value. The return value is |
| expected to be used by the a combiner.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class implements a value aggregator that maintain the smallest of |
| a sequence of strings.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.aggregate.StringValueMin --> |
| <!-- start class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount --> |
| <class name="UniqValueCount" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/> |
| <constructor name="UniqValueCount" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[the default constructor]]> |
| </doc> |
| </constructor> |
| <constructor name="UniqValueCount" type="long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[constructor |
| @param maxNum the limit in the number of unique values to keep.]]> |
| </doc> |
| </constructor> |
| <method name="setMaxItems" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="n" type="long"/> |
| <doc> |
| <![CDATA[Set the limit on the number of unique values |
| @param n the desired limit on the number of unique values |
| @return the new limit on the number of unique values]]> |
| </doc> |
| </method> |
| <method name="addNextValue" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[add a value to the aggregator |
| |
| @param val |
| an object.]]> |
| </doc> |
| </method> |
| <method name="getReport" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return return the number of unique objects aggregated]]> |
| </doc> |
| </method> |
| <method name="getUniqueItems" return="java.util.Set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the set of the unique objects]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[reset the aggregator]]> |
| </doc> |
| </method> |
| <method name="getCombinerOutput" return="java.util.ArrayList" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return return an array of the unique objects. The return value is |
| expected to be used by the a combiner.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class implements a value aggregator that dedupes a sequence of objects.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.aggregate.UniqValueCount --> |
| <!-- start class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor --> |
| <class name="UserDefinedValueAggregatorDescriptor" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/> |
| <constructor name="UserDefinedValueAggregatorDescriptor" type="java.lang.String, org.apache.hadoop.mapred.JobConf" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@param className the class name of the user defined descriptor class |
| @param job a configure object used for decriptor configuration]]> |
| </doc> |
| </constructor> |
| <method name="createInstance" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="className" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Create an instance of the given class |
| @param className the name of the class |
| @return a dynamically created instance of the given class]]> |
| </doc> |
| </method> |
| <method name="generateKeyValPairs" return="java.util.ArrayList<java.util.Map.Entry<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="java.lang.Object"/> |
| <param name="val" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pairs |
| by delegating the invocation to the real object. |
| |
| @param key |
| input key |
| @param val |
| input value |
| @return a list of aggregation id/value pairs. An aggregation id encodes an |
| aggregation type which is used to guide the way to aggregate the |
| value in the reduce/combiner phrase of an Aggregate based job.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the string representation of this object.]]> |
| </doc> |
| </method> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Do nothing.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class implements a wrapper for a user defined value aggregator descriptor. |
| It servs two functions: One is to create an object of ValueAggregatorDescriptor from the |
| name of a user defined class that may be dynamically loaded. The other is to |
| deligate inviokations of generateKeyValPairs function to the created object.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.aggregate.UserDefinedValueAggregatorDescriptor --> |
| <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator --> |
| <interface name="ValueAggregator" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="addNextValue" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[add a value to the aggregator |
| |
| @param val the value to be added]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[reset the aggregator]]> |
| </doc> |
| </method> |
| <method name="getReport" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the string representation of the agregator]]> |
| </doc> |
| </method> |
| <method name="getCombinerOutput" return="java.util.ArrayList" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return an array of values as the outputs of the combiner.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This interface defines the minimal protocol for value aggregators.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregator --> |
| <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor --> |
| <class name="ValueAggregatorBaseDescriptor" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor"/> |
| <constructor name="ValueAggregatorBaseDescriptor" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="generateEntry" return="java.util.Map.Entry<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="type" type="java.lang.String"/> |
| <param name="id" type="java.lang.String"/> |
| <param name="val" type="org.apache.hadoop.io.Text"/> |
| <doc> |
| <![CDATA[@param type the aggregation type |
| @param id the aggregation id |
| @param val the val associated with the id to be aggregated |
| @return an Entry whose key is the aggregation id prefixed with |
| the aggregation type.]]> |
| </doc> |
| </method> |
| <method name="generateValueAggregator" return="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="type" type="java.lang.String"/> |
| <doc> |
| <![CDATA[@param type the aggregation type |
| @return a value aggregator of the given type.]]> |
| </doc> |
| </method> |
| <method name="generateKeyValPairs" return="java.util.ArrayList<java.util.Map.Entry<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="java.lang.Object"/> |
| <param name="val" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Generate 1 or 2 aggregation-id/value pairs for the given key/value pair. |
| The first id will be of type LONG_VALUE_SUM, with "record_count" as |
| its aggregation id. If the input is a file split, |
| the second id of the same type will be generated too, with the file name |
| as its aggregation id. This achieves the behavior of counting the total number |
| of records in the input data, and the number of records in each input file. |
| |
| @param key |
| input key |
| @param val |
| input value |
| @return a list of aggregation id/value pairs. An aggregation id encodes an |
| aggregation type which is used to guide the way to aggregate the |
| value in the reduce/combiner phrase of an Aggregate based job.]]> |
| </doc> |
| </method> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[get the input file name. |
| |
| @param job a job configuration object]]> |
| </doc> |
| </method> |
| <field name="UNIQ_VALUE_COUNT" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="LONG_VALUE_SUM" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="DOUBLE_VALUE_SUM" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="VALUE_HISTOGRAM" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="LONG_VALUE_MAX" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="LONG_VALUE_MIN" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="STRING_VALUE_MAX" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="STRING_VALUE_MIN" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="inputFile" type="java.lang.String" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[This class implements the common functionalities of |
| the subclasses of ValueAggregatorDescriptor class.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor --> |
| <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner --> |
| <class name="ValueAggregatorCombiner" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase<K1, V1>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ValueAggregatorCombiner" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Combiner does not need to configure.]]> |
| </doc> |
| </method> |
| <method name="reduce" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.Text"/> |
| <param name="values" type="java.util.Iterator<org.apache.hadoop.io.Text>"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Combines values for a given key. |
| @param key the key is expected to be a Text object, whose prefix indicates |
| the type of aggregation to aggregate the values. |
| @param values the values to combine |
| @param output to collect combined values]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Do nothing.]]> |
| </doc> |
| </method> |
| <method name="map" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="arg0" type="K1 extends org.apache.hadoop.io.WritableComparable"/> |
| <param name="arg1" type="V1 extends org.apache.hadoop.io.Writable"/> |
| <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>"/> |
| <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Do nothing. Should not be called.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class implements the generic combiner of Aggregate.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorCombiner --> |
| <!-- start interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor --> |
| <interface name="ValueAggregatorDescriptor" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="generateKeyValPairs" return="java.util.ArrayList<java.util.Map.Entry<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="java.lang.Object"/> |
| <param name="val" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Generate a list of aggregation-id/value pairs for the given key/value pair. |
| This function is usually called by the mapper of an Aggregate based job. |
| |
| @param key |
| input key |
| @param val |
| input value |
| @return a list of aggregation id/value pairs. An aggregation id encodes an |
| aggregation type which is used to guide the way to aggregate the |
| value in the reduce/combiner phrase of an Aggregate based job.]]> |
| </doc> |
| </method> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Configure the object |
| |
| @param job |
| a JobConf object that may contain the information that can be used |
| to configure the object.]]> |
| </doc> |
| </method> |
| <field name="TYPE_SEPARATOR" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="ONE" type="org.apache.hadoop.io.Text" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[This interface defines the contract a value aggregator descriptor must |
| support. Such a descriptor can be configured with a JobConf object. Its main |
| function is to generate a list of aggregation-id/value pairs. An aggregation |
| id encodes an aggregation type which is used to guide the way to aggregate |
| the value in the reduce/combiner phrase of an Aggregate based job.The mapper in |
| an Aggregate based map/reduce job may create one or more of |
| ValueAggregatorDescriptor objects at configuration time. For each input |
| key/value pair, the mapper will use those objects to create aggregation |
| id/value pairs.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor --> |
| <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob --> |
| <class name="ValueAggregatorJob" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ValueAggregatorJob" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <param name="descriptors" type="java.lang.Class[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createValueAggregatorJobs" return="org.apache.hadoop.mapred.jobcontrol.JobControl" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create an Aggregate based map/reduce job. |
| |
| @param args the arguments used for job creation. Generic hadoop |
| arguments are accepted. |
| @return a JobConf object ready for submission. |
| |
| @throws IOException |
| @see GenericOptionsParser]]> |
| </doc> |
| </method> |
| <method name="createValueAggregatorJob" return="org.apache.hadoop.mapred.JobConf" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <param name="descriptors" type="java.lang.Class[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="setAggregatorDescriptors" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="descriptors" type="java.lang.Class[]"/> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[create and run an Aggregate based map/reduce job. |
| |
| @param args the arguments used for job creation |
| @throws IOException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This is the main class for creating a map/reduce job using Aggregate |
| framework. The Aggregate is a specialization of map/reduce framework, |
| specilizing for performing various simple aggregations. |
| |
| Generally speaking, in order to implement an application using Map/Reduce |
| model, the developer is to implement Map and Reduce functions (and possibly |
| combine function). However, a lot of applications related to counting and |
| statistics computing have very similar characteristics. Aggregate abstracts |
| out the general patterns of these functions and implementing those patterns. |
| In particular, the package provides generic mapper/redducer/combiner classes, |
| and a set of built-in value aggregators, and a generic utility class that |
| helps user create map/reduce jobs using the generic class. The built-in |
| aggregators include: |
| |
| sum over numeric values count the number of distinct values compute the |
| histogram of values compute the minimum, maximum, media,average, standard |
| deviation of numeric values |
| |
| The developer using Aggregate will need only to provide a plugin class |
| conforming to the following interface: |
| |
| public interface ValueAggregatorDescriptor { public ArrayList<Entry> |
| generateKeyValPairs(Object key, Object value); public void |
| configure(JobConfjob); } |
| |
| The package also provides a base class, ValueAggregatorBaseDescriptor, |
| implementing the above interface. The user can extend the base class and |
| implement generateKeyValPairs accordingly. |
| |
| The primary work of generateKeyValPairs is to emit one or more key/value |
| pairs based on the input key/value pair. The key in an output key/value pair |
| encode two pieces of information: aggregation type and aggregation id. The |
| value will be aggregated onto the aggregation id according the aggregation |
| type. |
| |
| This class offers a function to generate a map/reduce job using Aggregate |
| framework. The function takes the following parameters: input directory spec |
| input format (text or sequence file) output directory a file specifying the |
| user plugin class]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJob --> |
| <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase --> |
| <class name="ValueAggregatorJobBase" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.Mapper<K1, V1, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>"/> |
| <implements name="org.apache.hadoop.mapred.Reducer<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>"/> |
| <constructor name="ValueAggregatorJobBase" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| </method> |
| <method name="logSpec" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <field name="aggregatorDescriptorList" type="java.util.ArrayList<org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorDescriptor>" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[This abstract class implements some common functionalities of the |
| the generic mapper, reducer and combiner classes of Aggregate.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase --> |
| <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper --> |
| <class name="ValueAggregatorMapper" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase<K1, V1>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ValueAggregatorMapper" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="map" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K1 extends org.apache.hadoop.io.WritableComparable"/> |
| <param name="value" type="V1 extends org.apache.hadoop.io.Writable"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[the map function. It iterates through the value aggregator descriptor |
| list to generate aggregation id/value pairs and emit them.]]> |
| </doc> |
| </method> |
| <method name="reduce" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="arg0" type="org.apache.hadoop.io.Text"/> |
| <param name="arg1" type="java.util.Iterator<org.apache.hadoop.io.Text>"/> |
| <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>"/> |
| <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Do nothing. Should not be called.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class implements the generic mapper of Aggregate.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorMapper --> |
| <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer --> |
| <class name="ValueAggregatorReducer" extends="org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorJobBase<K1, V1>" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ValueAggregatorReducer" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="reduce" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="org.apache.hadoop.io.Text"/> |
| <param name="values" type="java.util.Iterator<org.apache.hadoop.io.Text>"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[@param key |
| the key is expected to be a Text object, whose prefix indicates |
| the type of aggregation to aggregate the values. In effect, data |
| driven computing is achieved. It is assumed that each aggregator's |
| getReport method emits appropriate output for the aggregator. This |
| may be further customiized. |
| @value the values to be aggregated]]> |
| </doc> |
| </method> |
| <method name="map" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="arg0" type="K1 extends org.apache.hadoop.io.WritableComparable"/> |
| <param name="arg1" type="V1 extends org.apache.hadoop.io.Writable"/> |
| <param name="arg2" type="org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.Text, org.apache.hadoop.io.Text>"/> |
| <param name="arg3" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Do nothing. Should not be called]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class implements the generic reducer of Aggregate.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorReducer --> |
| <!-- start class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram --> |
| <class name="ValueHistogram" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.lib.aggregate.ValueAggregator"/> |
| <constructor name="ValueHistogram" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="addNextValue" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[add the given val to the aggregator. |
| |
| @param val the value to be added. It is expected to be a string |
| in the form of xxxx\tnum, meaning xxxx has num occurrences.]]> |
| </doc> |
| </method> |
| <method name="getReport" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the string representation of this aggregator. |
| It includes the following basic statistics of the histogram: |
| the number of unique values |
| the minimum value |
| the media value |
| the maximum value |
| the average value |
| the standard deviation]]> |
| </doc> |
| </method> |
| <method name="getReportDetails" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return a string representation of the list of value/frequence pairs of |
| the histogram]]> |
| </doc> |
| </method> |
| <method name="getCombinerOutput" return="java.util.ArrayList" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return a list value/frequence pairs. |
| The return value is expected to be used by the reducer.]]> |
| </doc> |
| </method> |
| <method name="getReportItems" return="java.util.TreeMap" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return a TreeMap representation of the histogram]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[reset the aggregator]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[This class implements a value aggregator that computes the |
| histogram of a sequence of strings.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.lib.aggregate.ValueHistogram --> |
| <doc> |
| <![CDATA[Classes for performing various counting and aggregations. |
| <p /> |
| <h2><a name="Aggregate"></a>Aggregate framework </h2> |
| <p /> |
| Generally speaking, in order to implement an application using Map/Reduce |
| model, the developer needs to implement Map and Reduce functions (and possibly |
| Combine function). However, for a lot of applications related to counting and |
| statistics computing, these functions have very similar |
| characteristics. This provides a package implementing |
| those patterns. In particular, the package provides a generic mapper class, |
| a reducer class and a combiner class, and a set of built-in value aggregators. |
| It also provides a generic utility class, ValueAggregatorJob, that offers a static function that |
| creates map/reduce jobs: |
| <blockquote> |
| <pre> |
| public static JobConf createValueAggregatorJob(String args[]) throws IOException; |
| </pre> |
| </blockquote> |
| To call this function, the user needs to pass in arguments specifying the input directories, the output directory, |
| the number of reducers, the input data format (textinputformat or sequencefileinputformat), and a file specifying user plugin class(es) to load by the mapper. |
| A user plugin class is responsible for specifying what |
| aggregators to use and what values are for which aggregators. |
| A plugin class must implement the following interface: |
| <blockquote> |
| <pre> |
| public interface ValueAggregatorDescriptor { |
| public ArrayList<Entry> generateKeyValPairs(Object key, Object value); |
| public void configure(JobConfjob); |
| } |
| </pre> |
| </blockquote> |
| Function generateKeyValPairs will generate aggregation key/value pairs for the |
| input key/value pair. Each aggregation key encodes two pieces of information: the aggregation type and aggregation ID. |
| The value is the value to be aggregated onto the aggregation ID according to the aggregation type. Here |
| is a simple example user plugin class for counting the words in the input texts: |
| <blockquote> |
| <pre> |
| public class WordCountAggregatorDescriptor extends ValueAggregatorBaseDescriptor { |
| public ArrayList<Entry> generateKeyValPairs(Object key, Object val) { |
| String words [] = val.toString().split(" |\t"); |
| ArrayList<Entry> retv = new ArrayList<Entry>(); |
| for (int i = 0; i < words.length; i++) { |
| retv.add(generateEntry(LONG_VALUE_SUM, words[i], ONE)) |
| } |
| return retv; |
| } |
| public void configure(JobConf job) {} |
| } |
| </pre> |
| </blockquote> |
| In the above code, LONG_VALUE_SUM is a string denoting the aggregation type LongValueSum, which sums over long values. |
| ONE denotes a string "1". Function generateEntry(LONG_VALUE_SUM, words[i], ONE) will inperpret the first argument as an aggregation type, the second as an aggregation ID, and the third argumnent as the value to be aggregated. The output will look like: "LongValueSum:xxxx", where XXXX is the string value of words[i]. The value will be "1". The mapper will call generateKeyValPairs(Object key, Object val) for each input key/value pair to generate the desired aggregation id/value pairs. |
| The down stream combiner/reducer will interpret these pairs as adding one to the aggregator XXXX. |
| <p /> |
| Class ValueAggregatorBaseDescriptor is a base class that user plugin classes can extend. Here is the XML fragment specifying the user plugin class: |
| <blockquote> |
| <pre> |
| <property> |
| <name>aggregator.descriptor.num</name> |
| <value>1</value> |
| </property> |
| <property> |
| <name>aggregator.descriptor.0</name> |
| <value>UserDefined,org.apache.hadoop.mapred.lib.aggregate.examples.WordCountAggregatorDescriptor</value> |
| </property> |
| </pre> |
| </blockquote> |
| Class ValueAggregatorBaseDescriptor itself provides a default implementation for generateKeyValPairs: |
| <blockquote> |
| <pre> |
| public ArrayList<Entry> generateKeyValPairs(Object key, Object val) { |
| ArrayList<Entry> retv = new ArrayList<Entry>(); |
| String countType = LONG_VALUE_SUM; |
| String id = "record_count"; |
| retv.add(generateEntry(countType, id, ONE)); |
| return retv; |
| } |
| </pre> |
| </blockquote> |
| Thus, if no user plugin class is specified, the default behavior of the map/reduce job is to count the number of records (lines) in the imput files. |
| <p /> |
| During runtime, the mapper will invoke the generateKeyValPairs function for each input key/value pair, and emit the generated |
| key/value pairs: |
| <blockquote> |
| <pre> |
| public void map(WritableComparable key, Writable value, |
| OutputCollector output, Reporter reporter) throws IOException { |
| Iterator iter = this.aggregatorDescriptorList.iterator(); |
| while (iter.hasNext()) { |
| ValueAggregatorDescriptor ad = (ValueAggregatorDescriptor) iter.next(); |
| Iterator<Entry> ens = ad.generateKeyValPairs(key, value).iterator(); |
| while (ens.hasNext()) { |
| Entry en = ens.next(); |
| output.collect((WritableComparable)en.getKey(), (Writable)en.getValue()); |
| } |
| } |
| } |
| </pre> |
| </blockquote> |
| The reducer will create an aggregator object for each key/value list pair, and perform the appropriate aggregation. |
| At the end, it will emit the aggregator's results: |
| <blockquote> |
| <pre> |
| public void reduce(WritableComparable key, Iterator values, |
| OutputCollector output, Reporter reporter) throws IOException { |
| String keyStr = key.toString(); |
| int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR); |
| String type = keyStr.substring(0,pos); |
| keyStr = keyStr.substring(pos+ValueAggregatorDescriptor.TYPE_SEPARATOR.length()); |
| ValueAggregator aggregator = |
| ValueAggregatorBaseDescriptor.generateValueAggregator(type); |
| while (values.hasNext()) { |
| aggregator.addNextValue(values.next()); |
| } |
| String val = aggregator.getReport(); |
| key = new Text(keyStr); |
| output.collect(key, new Text(val)); |
| } |
| </pre> |
| </blockquote> |
| In order to be able to use combiner, all the aggregation type be aggregators must be associative and communitive. |
| The following are the types supported: <ul> |
| <li> LongValueSum: sum over long values |
| </li> <li> DoubleValueSum: sum over float/double values |
| </li> <li> uniqValueCount: count the number of distinct values |
| </li> <li> ValueHistogram: compute the histogram of values compute the minimum, maximum, media,average, standard deviation of numeric values |
| </li></ul> |
| <p /> |
| <h2><a name="Create_and_run"></a> Create and run an application </h2> |
| <p /> |
| To create an application, the user needs to do the following things: |
| <p /> |
| 1. Implement a user plugin: |
| <blockquote> |
| <pre> |
| import org.apache.hadoop.mapred.lib.aggregate.ValueAggregatorBaseDescriptor; |
| import org.apache.hadoop.mapred.JobConf; |
| |
| public class WordCountAggregatorDescriptor extends ValueAggregatorBaseDescriptor { |
| public void map(WritableComparable key, Writable value, |
| OutputCollector output, Reporter reporter) throws IOException { |
| } |
| public void configure(JobConf job) { |
| |
| } |
| } |
| </pre> |
| </blockquote> |
| |
| 2. Create an xml file specifying the user plugin. |
| <p /> |
| 3. Compile your java class and create a jar file, say wc.jar. |
| |
| <p /> |
| Finally, run the job: |
| <blockquote> |
| <pre> |
| hadoop jar wc.jar org.apache.hadoop.mapred.lib.aggregate..ValueAggregatorJob indirs outdir numofreducers textinputformat|sequencefileinputformat spec_file |
| </pre> |
| </blockquote> |
| <p />]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.mapred.pipes"> |
| <!-- start class org.apache.hadoop.mapred.pipes.Submitter --> |
| <class name="Submitter" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Submitter" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getExecutable" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Get the URI of the application's executable. |
| @param conf |
| @return the URI where the application's executable is located]]> |
| </doc> |
| </method> |
| <method name="setExecutable" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="executable" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the URI for the application's executable. Normally this is a hdfs: |
| location. |
| @param conf |
| @param executable The URI of the application's executable.]]> |
| </doc> |
| </method> |
| <method name="setIsJavaRecordReader" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="value" type="boolean"/> |
| <doc> |
| <![CDATA[Set whether the job is using a Java RecordReader. |
| @param conf the configuration to modify |
| @param value the new value]]> |
| </doc> |
| </method> |
| <method name="getIsJavaRecordReader" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Check whether the job is using a Java RecordReader |
| @param conf the configuration to check |
| @return is it a Java RecordReader?]]> |
| </doc> |
| </method> |
| <method name="setIsJavaMapper" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="value" type="boolean"/> |
| <doc> |
| <![CDATA[Set whether the Mapper is written in Java. |
| @param conf the configuration to modify |
| @param value the new value]]> |
| </doc> |
| </method> |
| <method name="getIsJavaMapper" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Check whether the job is using a Java Mapper. |
| @param conf the configuration to check |
| @return is it a Java Mapper?]]> |
| </doc> |
| </method> |
| <method name="setIsJavaReducer" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="value" type="boolean"/> |
| <doc> |
| <![CDATA[Set whether the Reducer is written in Java. |
| @param conf the configuration to modify |
| @param value the new value]]> |
| </doc> |
| </method> |
| <method name="getIsJavaReducer" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Check whether the job is using a Java Reducer. |
| @param conf the configuration to check |
| @return is it a Java Reducer?]]> |
| </doc> |
| </method> |
| <method name="setIsJavaRecordWriter" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="value" type="boolean"/> |
| <doc> |
| <![CDATA[Set whether the job will use a Java RecordWriter. |
| @param conf the configuration to modify |
| @param value the new value to set]]> |
| </doc> |
| </method> |
| <method name="getIsJavaRecordWriter" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Will the reduce use a Java RecordWriter? |
| @param conf the configuration to check |
| @return true, if the output of the job will be written by Java]]> |
| </doc> |
| </method> |
| <method name="getKeepCommandFile" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Does the user want to keep the command file for debugging? If this is |
| true, pipes will write a copy of the command data to a file in the |
| task directory named "downlink.data", which may be used to run the C++ |
| program under the debugger. You probably also want to set |
| JobConf.setKeepFailedTaskFiles(true) to keep the entire directory from |
| being deleted. |
| To run using the data file, set the environment variable |
| "hadoop.pipes.command.file" to point to the file. |
| @param conf the configuration to check |
| @return will the framework save the command file?]]> |
| </doc> |
| </method> |
| <method name="setKeepCommandFile" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="keep" type="boolean"/> |
| <doc> |
| <![CDATA[Set whether to keep the command file for debugging |
| @param conf the configuration to modify |
| @param keep the new value]]> |
| </doc> |
| </method> |
| <method name="submitJob" return="org.apache.hadoop.mapred.RunningJob" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.mapred.JobConf"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Submit a job to the map/reduce cluster. All of the necessary modifications |
| to the job to run under pipes are made to the configuration. |
| @param conf the job to submit to the cluster (MODIFIED) |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[Submit a pipes job based on the command line arguments. |
| @param args]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[The main entry point and job submitter. It may either be used as a command |
| line-based or API-based method to launch Pipes jobs.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.mapred.pipes.Submitter --> |
| <doc> |
| <![CDATA[Hadoop Pipes allows C++ code to use Hadoop DFS and map/reduce. The |
| primary approach is to split the C++ code into a separate process that |
| does the application specific code. In many ways, the approach will be |
| similar to Hadoop streaming, but using Writable serialization to |
| convert the types into bytes that are sent to the process via a |
| socket. |
| |
| <p> |
| |
| The class org.apache.hadoop.mapred.pipes.Submitter has a public static |
| method to submit a job as a JobConf and a main method that takes an |
| application and optional configuration file, input directories, and |
| output directory. The cli for the main looks like: |
| |
| <pre> |
| bin/hadoop pipes \ |
| [-conf <i>path</i>] \ |
| [-input <i>inputDir</i>] \ |
| [-output <i>outputDir</i>] \ |
| [-jar <i>applicationJarFile</i>] \ |
| [-inputformat <i>class</i>] \ |
| [-map <i>class</i>] \ |
| [-partitioner <i>class</i>] \ |
| [-reduce <i>class</i>] \ |
| [-writer <i>class</i>] \ |
| [-program <i>program url</i>] |
| </pre> |
| |
| <p> |
| |
| The application programs link against a thin C++ wrapper library that |
| handles the communication with the rest of the Hadoop system. The C++ |
| interface is "swigable" so that interfaces can be generated for python |
| and other scripting languages. All of the C++ functions and classes |
| are in the HadoopPipes namespace. The job may consist of any |
| combination of Java and C++ RecordReaders, Mappers, Paritioner, |
| Combiner, Reducer, and RecordWriter. |
| |
| <p> |
| |
| Hadoop Pipes has a generic Java class for handling the mapper and |
| reducer (PipesMapRunner and PipesReducer). They fork off the |
| application program and communicate with it over a socket. The |
| communication is handled by the C++ wrapper library and the |
| PipesMapRunner and PipesReducer. |
| |
| <p> |
| |
| The application program passes in a factory object that can create |
| the various objects needed by the framework to the runTask |
| function. The framework creates the Mapper or Reducer as |
| appropriate and calls the map or reduce method to invoke the |
| application's code. The JobConf is available to the application. |
| |
| <p> |
| |
| The Mapper and Reducer objects get all of their inputs, outputs, and |
| context via context objects. The advantage of using the context |
| objects is that their interface can be extended with additional |
| methods without breaking clients. Although this interface is different |
| from the current Java interface, the plan is to migrate the Java |
| interface in this direction. |
| |
| <p> |
| |
| Although the Java implementation is typed, the C++ interfaces of keys |
| and values is just a byte buffer. Since STL strings provide precisely |
| the right functionality and are standard, they will be used. The |
| decision to not use stronger types was to simplify the interface. |
| |
| <p> |
| |
| The application can also define combiner functions. The combiner will |
| be run locally by the framework in the application process to avoid |
| the round trip to the Java process and back. Because the compare |
| function is not available in C++, the combiner will use memcmp to |
| sort the inputs to the combiner. This is not as general as the Java |
| equivalent, which uses the user's comparator, but should cover the |
| majority of the use cases. As the map function outputs key/value |
| pairs, they will be buffered. When the buffer is full, it will be |
| sorted and passed to the combiner. The output of the combiner will be |
| sent to the Java process. |
| |
| <p> |
| |
| The application can also set a partition function to control which key |
| is given to a particular reduce. If a partition function is not |
| defined, the Java one will be used. The partition function will be |
| called by the C++ framework before the key/value pair is sent back to |
| Java.]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.metrics"> |
| <!-- start class org.apache.hadoop.metrics.ContextFactory --> |
| <class name="ContextFactory" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ContextFactory" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of ContextFactory]]> |
| </doc> |
| </constructor> |
| <method name="getAttribute" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="attributeName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Returns the value of the named attribute, or null if there is no |
| attribute of that name. |
| |
| @param attributeName the attribute name |
| @return the attribute value]]> |
| </doc> |
| </method> |
| <method name="getAttributeNames" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the names of all the factory's attributes. |
| |
| @return the attribute names]]> |
| </doc> |
| </method> |
| <method name="setAttribute" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="attributeName" type="java.lang.String"/> |
| <param name="value" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Sets the named factory attribute to the specified value, creating it |
| if it did not already exist. If the value is null, this is the same as |
| calling removeAttribute. |
| |
| @param attributeName the attribute name |
| @param value the new attribute value]]> |
| </doc> |
| </method> |
| <method name="removeAttribute" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="attributeName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Removes the named attribute if it exists. |
| |
| @param attributeName the attribute name]]> |
| </doc> |
| </method> |
| <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="contextName" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <exception name="ClassNotFoundException" type="java.lang.ClassNotFoundException"/> |
| <exception name="InstantiationException" type="java.lang.InstantiationException"/> |
| <exception name="IllegalAccessException" type="java.lang.IllegalAccessException"/> |
| <doc> |
| <![CDATA[Returns the named MetricsContext instance, constructing it if necessary |
| using the factory's current configuration attributes. <p/> |
| |
| When constructing the instance, if the factory property |
| <i>contextName</i>.class</code> exists, |
| its value is taken to be the name of the class to instantiate. Otherwise, |
| the default is to create an instance of |
| <code>org.apache.hadoop.metrics.spi.NullContext</code>, which is a |
| dummy "no-op" context which will cause all metric data to be discarded. |
| |
| @param contextName the name of the context |
| @return the named MetricsContext]]> |
| </doc> |
| </method> |
| <method name="getNullContext" return="org.apache.hadoop.metrics.MetricsContext" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="contextName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Returns a "null" context - one which does nothing.]]> |
| </doc> |
| </method> |
| <method name="getFactory" return="org.apache.hadoop.metrics.ContextFactory" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns the singleton ContextFactory instance, constructing it if |
| necessary. <p/> |
| |
| When the instance is constructed, this method checks if the file |
| <code>hadoop-metrics.properties</code> exists on the class path. If it |
| exists, it must be in the format defined by java.util.Properties, and all |
| the properties in the file are set as attributes on the newly created |
| ContextFactory instance. |
| |
| @return the singleton ContextFactory instance]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Factory class for creating MetricsContext objects. To obtain an instance |
| of this class, use the static <code>getFactory()</code> method.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.ContextFactory --> |
| <!-- start interface org.apache.hadoop.metrics.MetricsContext --> |
| <interface name="MetricsContext" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getContextName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the context name. |
| |
| @return the context name]]> |
| </doc> |
| </method> |
| <method name="startMonitoring" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Starts or restarts monitoring, the emitting of metrics records as they are |
| updated.]]> |
| </doc> |
| </method> |
| <method name="stopMonitoring" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Stops monitoring. This does not free any data that the implementation |
| may have buffered for sending at the next timer event. It |
| is OK to call <code>startMonitoring()</code> again after calling |
| this. |
| @see #close()]]> |
| </doc> |
| </method> |
| <method name="isMonitoring" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns true if monitoring is currently in progress.]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Stops monitoring and also frees any buffered data, returning this |
| object to its initial state.]]> |
| </doc> |
| </method> |
| <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="recordName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Creates a new MetricsRecord instance with the given <code>recordName</code>. |
| Throws an exception if the metrics implementation is configured with a fixed |
| set of record names and <code>recordName</code> is not in that set. |
| |
| @param recordName the name of the record |
| @throws MetricsException if recordName conflicts with configuration data]]> |
| </doc> |
| </method> |
| <method name="registerUpdater" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="updater" type="org.apache.hadoop.metrics.Updater"/> |
| <doc> |
| <![CDATA[Registers a callback to be called at regular time intervals, as |
| determined by the implementation-class specific configuration. |
| |
| @param updater object to be run periodically; it should updated |
| some metrics records and then return]]> |
| </doc> |
| </method> |
| <method name="unregisterUpdater" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="updater" type="org.apache.hadoop.metrics.Updater"/> |
| <doc> |
| <![CDATA[Removes a callback, if it exists. |
| |
| @param updater object to be removed from the callback list]]> |
| </doc> |
| </method> |
| <field name="DEFAULT_PERIOD" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Default period in seconds at which data is sent to the metrics system.]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[The main interface to the metrics package.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.metrics.MetricsContext --> |
| <!-- start class org.apache.hadoop.metrics.MetricsException --> |
| <class name="MetricsException" extends="java.lang.RuntimeException" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MetricsException" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of MetricsException]]> |
| </doc> |
| </constructor> |
| <constructor name="MetricsException" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of MetricsException |
| |
| @param message an error message]]> |
| </doc> |
| </constructor> |
| <doc> |
| <![CDATA[General-purpose, unchecked metrics exception.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.MetricsException --> |
| <!-- start interface org.apache.hadoop.metrics.MetricsRecord --> |
| <interface name="MetricsRecord" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getRecordName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the record name. |
| |
| @return the record name]]> |
| </doc> |
| </method> |
| <method name="setTag" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tagName" type="java.lang.String"/> |
| <param name="tagValue" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Sets the named tag to the specified value. The tagValue may be null, |
| which is treated the same as an empty String. |
| |
| @param tagName name of the tag |
| @param tagValue new value of the tag |
| @throws MetricsException if the tagName conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="setTag" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tagName" type="java.lang.String"/> |
| <param name="tagValue" type="int"/> |
| <doc> |
| <![CDATA[Sets the named tag to the specified value. |
| |
| @param tagName name of the tag |
| @param tagValue new value of the tag |
| @throws MetricsException if the tagName conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="setTag" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tagName" type="java.lang.String"/> |
| <param name="tagValue" type="short"/> |
| <doc> |
| <![CDATA[Sets the named tag to the specified value. |
| |
| @param tagName name of the tag |
| @param tagValue new value of the tag |
| @throws MetricsException if the tagName conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="setTag" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tagName" type="java.lang.String"/> |
| <param name="tagValue" type="byte"/> |
| <doc> |
| <![CDATA[Sets the named tag to the specified value. |
| |
| @param tagName name of the tag |
| @param tagValue new value of the tag |
| @throws MetricsException if the tagName conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="removeTag" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tagName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Removes any tag of the specified name. |
| |
| @param tagName name of a tag]]> |
| </doc> |
| </method> |
| <method name="setMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="int"/> |
| <doc> |
| <![CDATA[Sets the named metric to the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue new value of the metric |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="setMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="short"/> |
| <doc> |
| <![CDATA[Sets the named metric to the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue new value of the metric |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="setMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="byte"/> |
| <doc> |
| <![CDATA[Sets the named metric to the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue new value of the metric |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="setMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="float"/> |
| <doc> |
| <![CDATA[Sets the named metric to the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue new value of the metric |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="incrMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="int"/> |
| <doc> |
| <![CDATA[Increments the named metric by the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue incremental value |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="incrMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="short"/> |
| <doc> |
| <![CDATA[Increments the named metric by the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue incremental value |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="incrMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="byte"/> |
| <doc> |
| <![CDATA[Increments the named metric by the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue incremental value |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="incrMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="float"/> |
| <doc> |
| <![CDATA[Increments the named metric by the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue incremental value |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="update" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Updates the table of buffered data which is to be sent periodically. |
| If the tag values match an existing row, that row is updated; |
| otherwise, a new row is added.]]> |
| </doc> |
| </method> |
| <method name="remove" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Removes, from the buffered data table, all rows having tags |
| that equal the tags that have been set on this record. For example, |
| if there are no tags on this record, all rows for this record name |
| would be removed. Or, if there is a single tag on this record, then |
| just rows containing a tag with the same name and value would be removed.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A named and optionally tagged set of records to be sent to the metrics |
| system. <p/> |
| |
| A record name identifies the kind of data to be reported. For example, a |
| program reporting statistics relating to the disks on a computer might use |
| a record name "diskStats".<p/> |
| |
| A record has zero or more <i>tags</i>. A tag has a name and a value. To |
| continue the example, the "diskStats" record might use a tag named |
| "diskName" to identify a particular disk. Sometimes it is useful to have |
| more than one tag, so there might also be a "diskType" with value "ide" or |
| "scsi" or whatever.<p/> |
| |
| A record also has zero or more <i>metrics</i>. These are the named |
| values that are to be reported to the metrics system. In the "diskStats" |
| example, possible metric names would be "diskPercentFull", "diskPercentBusy", |
| "kbReadPerSecond", etc.<p/> |
| |
| The general procedure for using a MetricsRecord is to fill in its tag and |
| metric values, and then call <code>update()</code> to pass the record to the |
| client library. |
| Metric data is not immediately sent to the metrics system |
| each time that <code>update()</code> is called. |
| An internal table is maintained, identified by the record name. This |
| table has columns |
| corresponding to the tag and the metric names, and rows |
| corresponding to each unique set of tag values. An update |
| either modifies an existing row in the table, or adds a new row with a set of |
| tag values that are different from all the other rows. Note that if there |
| are no tags, then there can be at most one row in the table. <p/> |
| |
| Once a row is added to the table, its data will be sent to the metrics system |
| on every timer period, whether or not it has been updated since the previous |
| timer period. If this is inappropriate, for example if metrics were being |
| reported by some transient object in an application, the <code>remove()</code> |
| method can be used to remove the row and thus stop the data from being |
| sent.<p/> |
| |
| Note that the <code>update()</code> method is atomic. This means that it is |
| safe for different threads to be updating the same metric. More precisely, |
| it is OK for different threads to call <code>update()</code> on MetricsRecord instances |
| with the same set of tag names and tag values. Different threads should |
| <b>not</b> use the same MetricsRecord instance at the same time.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.metrics.MetricsRecord --> |
| <!-- start class org.apache.hadoop.metrics.MetricsUtil --> |
| <class name="MetricsUtil" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getContext" return="org.apache.hadoop.metrics.MetricsContext" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="contextName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Utility method to return the named context. |
| If the desired context cannot be created for any reason, the exception |
| is logged, and a null context is returned.]]> |
| </doc> |
| </method> |
| <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/> |
| <param name="recordName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Utility method to create and return new metrics record instance within the |
| given context. This record is tagged with the host name. |
| |
| @param context the context |
| @param recordName name of the record |
| @return newly created metrics record]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Utility class to simplify creation and reporting of hadoop metrics. |
| |
| For examples of usage, see {@link org.apache.hadoop.dfs.DataNode}. |
| @see org.apache.hadoop.metrics.MetricsRecord |
| @see org.apache.hadoop.metrics.MetricsContext |
| @see org.apache.hadoop.metrics.ContextFactory]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.MetricsUtil --> |
| <!-- start interface org.apache.hadoop.metrics.Updater --> |
| <interface name="Updater" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="doUpdates" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/> |
| <doc> |
| <![CDATA[Timer-based call-back from the metric library.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Call-back interface. See <code>MetricsContext.registerUpdater()</code>.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.metrics.Updater --> |
| <doc> |
| <![CDATA[This package defines an API for reporting performance metric information. |
| <p/> |
| The API is abstract so that it can be implemented on top of |
| a variety of metrics client libraries. The choice of |
| client library is a configuration option, and different |
| modules within the same application can use |
| different metrics implementation libraries. |
| <p/> |
| Sub-packages: |
| <dl> |
| <dt><code>org.apache.hadoop.metrics.spi</code></dt> |
| <dd>The abstract Server Provider Interface package. Those wishing to |
| integrate the metrics API with a particular metrics client library should |
| extend this package.</dd> |
| |
| <dt><code>org.apache.hadoop.metrics.file</code></dt> |
| <dd>An implementation package which writes the metric data to |
| a file, or sends it to the standard output stream.</dd> |
| |
| <dt> <code>org.apache.hadoop.metrics.ganglia</code></dt> |
| <dd>An implementation package which sends metric data to |
| <a href="http://ganglia.sourceforge.net/">Ganglia</a>.</dd> |
| </dl> |
| |
| <h3>Introduction to the Metrics API</h3> |
| |
| Here is a simple example of how to use this package to report a single |
| metric value: |
| <pre> |
| private ContextFactory contextFactory = ContextFactory.getFactory(); |
| |
| void reportMyMetric(float myMetric) { |
| MetricsContext myContext = contextFactory.getContext("myContext"); |
| MetricsRecord myRecord = myContext.getRecord("myRecord"); |
| myRecord.setMetric("myMetric", myMetric); |
| myRecord.update(); |
| } |
| </pre> |
| |
| In this example there are three names: |
| <dl> |
| <dt><i>myContext</i></dt> |
| <dd>The context name will typically identify either the application, or else a |
| module within an application or library.</dd> |
| |
| <dt><i>myRecord</i></dt> |
| <dd>The record name generally identifies some entity for which a set of |
| metrics are to be reported. For example, you could have a record named |
| "cacheStats" for reporting a number of statistics relating to the usage of |
| some cache in your application.</dd> |
| |
| <dt><i>myMetric</i></dt> |
| <dd>This identifies a particular metric. For example, you might have metrics |
| named "cache_hits" and "cache_misses". |
| </dd> |
| </dl> |
| |
| <h3>Tags</h3> |
| |
| In some cases it is useful to have multiple records with the same name. For |
| example, suppose that you want to report statistics about each disk on a computer. |
| In this case, the record name would be something like "diskStats", but you also |
| need to identify the disk which is done by adding a <i>tag</i> to the record. |
| The code could look something like this: |
| <pre> |
| private MetricsRecord diskStats = |
| contextFactory.getContext("myContext").getRecord("diskStats"); |
| |
| void reportDiskMetrics(String diskName, float diskBusy, float diskUsed) { |
| diskStats.setTag("diskName", diskName); |
| diskStats.setMetric("diskBusy", diskBusy); |
| diskStats.setMetric("diskUsed", diskUsed); |
| diskStats.update(); |
| } |
| </pre> |
| |
| <h3>Buffering and Callbacks</h3> |
| |
| Data is not sent immediately to the metrics system when |
| <code>MetricsRecord.update()</code> is called. Instead it is stored in an |
| internal table, and the contents of the table are sent periodically. |
| This can be important for two reasons: |
| <ol> |
| <li>It means that a programmer is free to put calls to this API in an |
| inner loop, since updates can be very frequent without slowing down |
| the application significantly.</li> |
| <li>Some implementations can gain efficiency by combining many metrics |
| into a single UDP message.</li> |
| </ol> |
| |
| The API provides a timer-based callback via the |
| <code>registerUpdater()</code> method. The benefit of this |
| versus using <code>java.util.Timer</code> is that the callbacks will be done |
| immediately before sending the data, making the data as current as possible. |
| |
| <h3>Configuration</h3> |
| |
| It is possible to programmatically examine and modify configuration data |
| before creating a context, like this: |
| <pre> |
| ContextFactory factory = ContextFactory.getFactory(); |
| ... examine and/or modify factory attributes ... |
| MetricsContext context = factory.getContext("myContext"); |
| </pre> |
| The factory attributes can be examined and modified using the following |
| <code>ContextFactory</code>methods: |
| <ul> |
| <li><code>Object getAttribute(String attributeName)</code></li> |
| <li><code>String[] getAttributeNames()</code></li> |
| <li><code>void setAttribute(String name, Object value)</code></li> |
| <li><code>void removeAttribute(attributeName)</code></li> |
| </ul> |
| |
| <p/> |
| <code>ContextFactory.getFactory()</code> initializes the factory attributes by |
| reading the properties file <code>hadoop-metrics.properties</code> if it exists |
| on the class path. |
| |
| <p/> |
| A factory attribute named: |
| <pre> |
| <i>contextName</i>.class |
| </pre> |
| should have as its value the fully qualified name of the class to be |
| instantiated by a call of the <code>CodeFactory</code> method |
| <code>getContext(<i>contextName</i>)</code>. If this factory attribute is not |
| specified, the default is to instantiate |
| <code>org.apache.hadoop.metrics.file.FileContext</code>. |
| |
| <p/> |
| Other factory attributes are specific to a particular implementation of this |
| API and are documented elsewhere. For example, configuration attributes for |
| the file and Ganglia implementations can be found in the javadoc for |
| their respective packages.]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.metrics.file"> |
| <!-- start class org.apache.hadoop.metrics.file.FileContext --> |
| <class name="FileContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="FileContext" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of FileContext]]> |
| </doc> |
| </constructor> |
| <method name="init" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="contextName" type="java.lang.String"/> |
| <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/> |
| </method> |
| <method name="getFileName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the configured file name, or null.]]> |
| </doc> |
| </method> |
| <method name="startMonitoring" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Starts or restarts monitoring, by opening in append-mode, the |
| file specified by the <code>fileName</code> attribute, |
| if specified. Otherwise the data will be written to standard |
| output.]]> |
| </doc> |
| </method> |
| <method name="stopMonitoring" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Stops monitoring, closing the file. |
| @see #close()]]> |
| </doc> |
| </method> |
| <method name="emitRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="contextName" type="java.lang.String"/> |
| <param name="recordName" type="java.lang.String"/> |
| <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/> |
| <doc> |
| <![CDATA[Emits a metrics record to a file.]]> |
| </doc> |
| </method> |
| <method name="flush" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Flushes the output writer, forcing updates to disk.]]> |
| </doc> |
| </method> |
| <field name="FILE_NAME_PROPERTY" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="PERIOD_PROPERTY" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Metrics context for writing metrics to a file.<p/> |
| |
| This class is configured by setting ContextFactory attributes which in turn |
| are usually configured through a properties file. All the attributes are |
| prefixed by the contextName. For example, the properties file might contain: |
| <pre> |
| myContextName.fileName=/tmp/metrics.log |
| myContextName.period=5 |
| </pre>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.file.FileContext --> |
| <doc> |
| <![CDATA[Implementation of the metrics package that writes the metrics to a file. |
| Programmers should not normally need to use this package directly. Instead |
| they should use org.hadoop.metrics. |
| |
| <p/> |
| These are the implementation specific factory attributes |
| (See ContextFactory.getFactory()): |
| |
| <dl> |
| <dt><i>contextName</i>.fileName</dt> |
| <dd>The path of the file to which metrics in context <i>contextName</i> |
| are to be appended. If this attribute is not specified, the metrics |
| are written to standard output by default.</dd> |
| |
| <dt><i>contextName</i>.period</dt> |
| <dd>The period in seconds on which the metric data is written to the |
| file.</dd> |
| |
| </dl>]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.metrics.ganglia"> |
| <!-- start class org.apache.hadoop.metrics.ganglia.GangliaContext --> |
| <class name="GangliaContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="GangliaContext" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of GangliaContext]]> |
| </doc> |
| </constructor> |
| <method name="init" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="contextName" type="java.lang.String"/> |
| <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/> |
| </method> |
| <method name="emitRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="contextName" type="java.lang.String"/> |
| <param name="recordName" type="java.lang.String"/> |
| <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[Context for sending metrics to Ganglia.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.ganglia.GangliaContext --> |
| <doc> |
| <![CDATA[<!-- |
| * Licensed to the Apache Software Foundation (ASF) under one |
| * or more contributor license agreements. See the NOTICE file |
| * distributed with this work for additional information |
| * regarding copyright ownership. The ASF licenses this file |
| * to you under the Apache License, Version 2.0 (the |
| * "License"); you may not use this file except in compliance |
| * with the License. You may obtain a copy of the License at |
| * |
| * http://www.apache.org/licenses/LICENSE-2.0 |
| * |
| * Unless required by applicable law or agreed to in writing, software |
| * distributed under the License is distributed on an "AS IS" BASIS, |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| * See the License for the specific language governing permissions and |
| * limitations under the License. |
| --> |
| |
| Implementation of the metrics package that sends metric data to |
| <a href="http://ganglia.sourceforge.net/">Ganglia</a>. |
| Programmers should not normally need to use this package directly. Instead |
| they should use org.hadoop.metrics. |
| |
| <p/> |
| These are the implementation specific factory attributes |
| (See ContextFactory.getFactory()): |
| |
| <dl> |
| <dt><i>contextName</i>.servers</dt> |
| <dd>Space and/or comma separated sequence of servers to which UDP |
| messages should be sent.</dd> |
| |
| <dt><i>contextName</i>.period</dt> |
| <dd>The period in seconds on which the metric data is sent to the |
| server(s).</dd> |
| |
| <dt><i>contextName</i>.units.<i>recordName</i>.<i>metricName</i></dt> |
| <dd>The units for the specified metric in the specified record.</dd> |
| |
| <dt><i>contextName</i>.slope.<i>recordName</i>.<i>metricName</i></dt> |
| <dd>The slope for the specified metric in the specified record.</dd> |
| |
| <dt><i>contextName</i>.tmax.<i>recordName</i>.<i>metricName</i></dt> |
| <dd>The tmax for the specified metric in the specified record.</dd> |
| |
| <dt><i>contextName</i>.dmax.<i>recordName</i>.<i>metricName</i></dt> |
| <dd>The dmax for the specified metric in the specified record.</dd> |
| |
| </dl>]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.metrics.jvm"> |
| <!-- start class org.apache.hadoop.metrics.jvm.EventCounter --> |
| <class name="EventCounter" extends="org.apache.log4j.AppenderSkeleton" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="EventCounter" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getFatal" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getError" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getWarn" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getInfo" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="append" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="event" type="org.apache.log4j.spi.LoggingEvent"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="requiresLayout" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[A log4J Appender that simply counts logging events in three levels: |
| fatal, error and warn.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.jvm.EventCounter --> |
| <!-- start class org.apache.hadoop.metrics.jvm.JvmMetrics --> |
| <class name="JvmMetrics" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.metrics.Updater"/> |
| <method name="init" return="org.apache.hadoop.metrics.jvm.JvmMetrics" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="processName" type="java.lang.String"/> |
| <param name="sessionId" type="java.lang.String"/> |
| </method> |
| <method name="doUpdates" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="context" type="org.apache.hadoop.metrics.MetricsContext"/> |
| <doc> |
| <![CDATA[This will be called periodically (with the period being configuration |
| dependent).]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Singleton class which eports Java Virtual Machine metrics to the metrics API. |
| Any application can create an instance of this class in order to emit |
| Java VM metrics.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.jvm.JvmMetrics --> |
| </package> |
| <package name="org.apache.hadoop.metrics.spi"> |
| <!-- start class org.apache.hadoop.metrics.spi.AbstractMetricsContext --> |
| <class name="AbstractMetricsContext" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.metrics.MetricsContext"/> |
| <constructor name="AbstractMetricsContext" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of AbstractMetricsContext]]> |
| </doc> |
| </constructor> |
| <method name="init" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="contextName" type="java.lang.String"/> |
| <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/> |
| <doc> |
| <![CDATA[Initializes the context.]]> |
| </doc> |
| </method> |
| <method name="getAttribute" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="attributeName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Convenience method for subclasses to access factory attributes.]]> |
| </doc> |
| </method> |
| <method name="getAttributeTable" return="java.util.Map<java.lang.String, java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="tableName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Returns an attribute-value map derived from the factory attributes |
| by finding all factory attributes that begin with |
| <i>contextName</i>.<i>tableName</i>. The returned map consists of |
| those attributes with the contextName and tableName stripped off.]]> |
| </doc> |
| </method> |
| <method name="getContextName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the context name.]]> |
| </doc> |
| </method> |
| <method name="getContextFactory" return="org.apache.hadoop.metrics.ContextFactory" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the factory by which this context was created.]]> |
| </doc> |
| </method> |
| <method name="startMonitoring" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Starts or restarts monitoring, the emitting of metrics records.]]> |
| </doc> |
| </method> |
| <method name="stopMonitoring" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Stops monitoring. This does not free buffered data. |
| @see #close()]]> |
| </doc> |
| </method> |
| <method name="isMonitoring" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns true if monitoring is currently in progress.]]> |
| </doc> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Stops monitoring and frees buffered data, returning this |
| object to its initial state.]]> |
| </doc> |
| </method> |
| <method name="createRecord" return="org.apache.hadoop.metrics.MetricsRecord" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <param name="recordName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Creates a new AbstractMetricsRecord instance with the given <code>recordName</code>. |
| Throws an exception if the metrics implementation is configured with a fixed |
| set of record names and <code>recordName</code> is not in that set. |
| |
| @param recordName the name of the record |
| @throws MetricsException if recordName conflicts with configuration data]]> |
| </doc> |
| </method> |
| <method name="newRecord" return="org.apache.hadoop.metrics.spi.MetricsRecordImpl" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="recordName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Subclasses should override this if they subclass MetricsRecordImpl. |
| @param recordName the name of the record |
| @return newly created instance of MetricsRecordImpl or subclass]]> |
| </doc> |
| </method> |
| <method name="registerUpdater" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="updater" type="org.apache.hadoop.metrics.Updater"/> |
| <doc> |
| <![CDATA[Registers a callback to be called at time intervals determined by |
| the configuration. |
| |
| @param updater object to be run periodically; it should update |
| some metrics records]]> |
| </doc> |
| </method> |
| <method name="unregisterUpdater" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="updater" type="org.apache.hadoop.metrics.Updater"/> |
| <doc> |
| <![CDATA[Removes a callback, if it exists. |
| |
| @param updater object to be removed from the callback list]]> |
| </doc> |
| </method> |
| <method name="emitRecord" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="contextName" type="java.lang.String"/> |
| <param name="recordName" type="java.lang.String"/> |
| <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Sends a record to the metrics system.]]> |
| </doc> |
| </method> |
| <method name="flush" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Called each period after all records have been emitted, this method does nothing. |
| Subclasses may override it in order to perform some kind of flush.]]> |
| </doc> |
| </method> |
| <method name="update" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/> |
| <doc> |
| <![CDATA[Called by MetricsRecordImpl.update(). Creates or updates a row in |
| the internal table of metric data.]]> |
| </doc> |
| </method> |
| <method name="remove" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/> |
| <doc> |
| <![CDATA[Called by MetricsRecordImpl.remove(). Removes all matching rows in |
| the internal table of metric data. A row matches if it has the same |
| tag names and values as record, but it may also have additional |
| tags.]]> |
| </doc> |
| </method> |
| <method name="getPeriod" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the timer period.]]> |
| </doc> |
| </method> |
| <method name="setPeriod" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="period" type="int"/> |
| <doc> |
| <![CDATA[Sets the timer period]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[The main class of the Service Provider Interface. This class should be |
| extended in order to integrate the Metrics API with a specific metrics |
| client library. <p/> |
| |
| This class implements the internal table of metric data, and the timer |
| on which data is to be sent to the metrics system. Subclasses must |
| override the abstract <code>emitRecord</code> method in order to transmit |
| the data. <p/>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.spi.AbstractMetricsContext --> |
| <!-- start class org.apache.hadoop.metrics.spi.MetricsRecordImpl --> |
| <class name="MetricsRecordImpl" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.metrics.MetricsRecord"/> |
| <constructor name="MetricsRecordImpl" type="java.lang.String, org.apache.hadoop.metrics.spi.AbstractMetricsContext" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of FileRecord]]> |
| </doc> |
| </constructor> |
| <method name="getRecordName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the record name. |
| |
| @return the record name]]> |
| </doc> |
| </method> |
| <method name="setTag" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tagName" type="java.lang.String"/> |
| <param name="tagValue" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Sets the named tag to the specified value. |
| |
| @param tagName name of the tag |
| @param tagValue new value of the tag |
| @throws MetricsException if the tagName conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="setTag" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tagName" type="java.lang.String"/> |
| <param name="tagValue" type="int"/> |
| <doc> |
| <![CDATA[Sets the named tag to the specified value. |
| |
| @param tagName name of the tag |
| @param tagValue new value of the tag |
| @throws MetricsException if the tagName conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="setTag" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tagName" type="java.lang.String"/> |
| <param name="tagValue" type="short"/> |
| <doc> |
| <![CDATA[Sets the named tag to the specified value. |
| |
| @param tagName name of the tag |
| @param tagValue new value of the tag |
| @throws MetricsException if the tagName conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="setTag" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tagName" type="java.lang.String"/> |
| <param name="tagValue" type="byte"/> |
| <doc> |
| <![CDATA[Sets the named tag to the specified value. |
| |
| @param tagName name of the tag |
| @param tagValue new value of the tag |
| @throws MetricsException if the tagName conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="removeTag" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tagName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Removes any tag of the specified name.]]> |
| </doc> |
| </method> |
| <method name="setMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="int"/> |
| <doc> |
| <![CDATA[Sets the named metric to the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue new value of the metric |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="setMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="short"/> |
| <doc> |
| <![CDATA[Sets the named metric to the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue new value of the metric |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="setMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="byte"/> |
| <doc> |
| <![CDATA[Sets the named metric to the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue new value of the metric |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="setMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="float"/> |
| <doc> |
| <![CDATA[Sets the named metric to the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue new value of the metric |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="incrMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="int"/> |
| <doc> |
| <![CDATA[Increments the named metric by the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue incremental value |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="incrMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="short"/> |
| <doc> |
| <![CDATA[Increments the named metric by the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue incremental value |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="incrMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="byte"/> |
| <doc> |
| <![CDATA[Increments the named metric by the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue incremental value |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="incrMetric" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="metricName" type="java.lang.String"/> |
| <param name="metricValue" type="float"/> |
| <doc> |
| <![CDATA[Increments the named metric by the specified value. |
| |
| @param metricName name of the metric |
| @param metricValue incremental value |
| @throws MetricsException if the metricName or the type of the metricValue |
| conflicts with the configuration]]> |
| </doc> |
| </method> |
| <method name="update" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Updates the table of buffered data which is to be sent periodically. |
| If the tag values match an existing row, that row is updated; |
| otherwise, a new row is added.]]> |
| </doc> |
| </method> |
| <method name="remove" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Removes the row, if it exists, in the buffered data table having tags |
| that equal the tags that have been set on this record.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[An implementation of MetricsRecord. Keeps a back-pointer to the context |
| from which it was created, and delegates back to it on <code>update</code> |
| and <code>remove()</code>.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.spi.MetricsRecordImpl --> |
| <!-- start class org.apache.hadoop.metrics.spi.MetricValue --> |
| <class name="MetricValue" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MetricValue" type="java.lang.Number, boolean" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of MetricValue]]> |
| </doc> |
| </constructor> |
| <method name="isIncrement" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="isAbsolute" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getNumber" return="java.lang.Number" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <field name="ABSOLUTE" type="boolean" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="INCREMENT" type="boolean" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[A Number that is either an absolute or an incremental amount.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.spi.MetricValue --> |
| <!-- start class org.apache.hadoop.metrics.spi.NullContext --> |
| <class name="NullContext" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="NullContext" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of NullContext]]> |
| </doc> |
| </constructor> |
| <method name="startMonitoring" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Do-nothing version of startMonitoring]]> |
| </doc> |
| </method> |
| <method name="emitRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="contextName" type="java.lang.String"/> |
| <param name="recordName" type="java.lang.String"/> |
| <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/> |
| <doc> |
| <![CDATA[Do-nothing version of emitRecord]]> |
| </doc> |
| </method> |
| <method name="update" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/> |
| <doc> |
| <![CDATA[Do-nothing version of update]]> |
| </doc> |
| </method> |
| <method name="remove" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/> |
| <doc> |
| <![CDATA[Do-nothing version of remove]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Null metrics context: a metrics context which does nothing. Used as the |
| default context, so that no performance data is emitted if no configuration |
| data is found.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.spi.NullContext --> |
| <!-- start class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread --> |
| <class name="NullContextWithUpdateThread" extends="org.apache.hadoop.metrics.spi.AbstractMetricsContext" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="NullContextWithUpdateThread" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of NullContextWithUpdateThread]]> |
| </doc> |
| </constructor> |
| <method name="init" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="contextName" type="java.lang.String"/> |
| <param name="factory" type="org.apache.hadoop.metrics.ContextFactory"/> |
| </method> |
| <method name="emitRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="contextName" type="java.lang.String"/> |
| <param name="recordName" type="java.lang.String"/> |
| <param name="outRec" type="org.apache.hadoop.metrics.spi.OutputRecord"/> |
| <doc> |
| <![CDATA[Do-nothing version of emitRecord]]> |
| </doc> |
| </method> |
| <method name="update" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/> |
| <doc> |
| <![CDATA[Do-nothing version of update]]> |
| </doc> |
| </method> |
| <method name="remove" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="record" type="org.apache.hadoop.metrics.spi.MetricsRecordImpl"/> |
| <doc> |
| <![CDATA[Do-nothing version of remove]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A null context which has a thread calling |
| periodically when monitoring is started. This keeps the data sampled |
| correctly. |
| In all other respects, this is like the NULL context: No data is emitted. |
| This is suitable for Monitoring systems like JMX which reads the metrics |
| when someone reads the data from JMX. |
| |
| The default impl of start and stop monitoring: |
| is the AbstractMetricsContext is good enough.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.spi.NullContextWithUpdateThread --> |
| <!-- start class org.apache.hadoop.metrics.spi.OutputRecord --> |
| <class name="OutputRecord" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getTagNames" return="java.util.Set<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the set of tag names]]> |
| </doc> |
| </method> |
| <method name="getTag" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Returns a tag object which is can be a String, Integer, Short or Byte. |
| |
| @return the tag value, or null if there is no such tag]]> |
| </doc> |
| </method> |
| <method name="getMetricNames" return="java.util.Set<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the set of metric names.]]> |
| </doc> |
| </method> |
| <method name="getMetric" return="java.lang.Number" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Returns the metric object which can be a Float, Integer, Short or Byte.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Represents a record of metric data to be sent to a metrics system.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.spi.OutputRecord --> |
| <!-- start class org.apache.hadoop.metrics.spi.Util --> |
| <class name="Util" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="parse" return="java.util.List<java.net.InetSocketAddress>" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="specs" type="java.lang.String"/> |
| <param name="defaultPort" type="int"/> |
| <doc> |
| <![CDATA[Parses a space and/or comma separated sequence of server specifications |
| of the form <i>hostname</i> or <i>hostname:port</i>. If |
| the specs string is null, defaults to localhost:defaultPort. |
| |
| @return a list of InetSocketAddress objects.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Static utility methods]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.spi.Util --> |
| <doc> |
| <![CDATA[The Service Provider Interface for the Metrics API. This package provides |
| an interface allowing a variety of metrics reporting implementations to be |
| plugged in to the Metrics API. Examples of such implementations can be found |
| in the packages <code>org.apache.hadoop.metrics.file</code> and |
| <code>org.apache.hadoop.metrics.ganglia</code>.<p/> |
| |
| Plugging in an implementation involves writing a concrete subclass of |
| <code>AbstractMetricsContext</code>. The subclass should get its |
| configuration information using the <code>getAttribute(<i>attributeName</i>)</code> |
| method.]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.metrics.util"> |
| <!-- start class org.apache.hadoop.metrics.util.MBeanUtil --> |
| <class name="MBeanUtil" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MBeanUtil" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="registerMBean" return="javax.management.ObjectName" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="serviceName" type="java.lang.String"/> |
| <param name="nameName" type="java.lang.String"/> |
| <param name="theMbean" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Register the mbean using out standard MBeanName format |
| "hadoop.dfs:service=<serviceName>,name=<nameName>" |
| Where the <serviceName> and <nameName> are the supplied parameters |
| |
| @param serviceName |
| @param nameName |
| @param theMbean - the MBean to register |
| @return the named used to register the MBean]]> |
| </doc> |
| </method> |
| <method name="unregisterMBean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="mbeanName" type="javax.management.ObjectName"/> |
| </method> |
| <doc> |
| <![CDATA[This util class provides a method to register an MBean using |
| our standard naming convention as described in the doc |
| for {link {@link #registerMBean(String, String, Object)}]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.util.MBeanUtil --> |
| <!-- start class org.apache.hadoop.metrics.util.MetricsIntValue --> |
| <class name="MetricsIntValue" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MetricsIntValue" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructor - create a new metric |
| @param nam the name of the metrics to be used to publish the metric]]> |
| </doc> |
| </constructor> |
| <method name="set" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="newValue" type="int"/> |
| <doc> |
| <![CDATA[Set the value |
| @param newValue]]> |
| </doc> |
| </method> |
| <method name="get" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get value |
| @return the value last set]]> |
| </doc> |
| </method> |
| <method name="pushMetric" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/> |
| <doc> |
| <![CDATA[Push the metric to the mr. |
| The metric is pushed only if it was updated since last push |
| |
| Note this does NOT push to JMX |
| (JMX gets the info via {@link #get()} |
| |
| @param mr]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[The MetricsIntValue class is for a metric that is not time varied |
| but changes only when it is set. |
| Each time its value is set, it is published only *once* at the next update |
| call.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.util.MetricsIntValue --> |
| <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt --> |
| <class name="MetricsTimeVaryingInt" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MetricsTimeVaryingInt" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructor - create a new metric |
| @param nam the name of the metrics to be used to publish the metric]]> |
| </doc> |
| </constructor> |
| <method name="inc" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="incr" type="int"/> |
| <doc> |
| <![CDATA[Inc metrics for incr vlaue |
| @param incr - number of operations]]> |
| </doc> |
| </method> |
| <method name="inc" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Inc metrics by one]]> |
| </doc> |
| </method> |
| <method name="pushMetric" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/> |
| <doc> |
| <![CDATA[Push the delta metrics to the mr. |
| The delta is since the last push/interval. |
| |
| Note this does NOT push to JMX |
| (JMX gets the info via {@link #previousIntervalValue} |
| |
| @param mr]]> |
| </doc> |
| </method> |
| <method name="getPreviousIntervalValue" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The Value at the Previous interval |
| @return prev interval value]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[The MetricsTimeVaryingInt class is for a metric that naturally |
| varies over time (e.g. number of files created). |
| The metric is is published at interval heart beat (the interval |
| is set in the metrics config file). |
| Note if one wants a time associated with the metric then use |
| @see org.apache.hadoop.metrics.util.MetricsTimeVaryingRate]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingInt --> |
| <!-- start class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate --> |
| <class name="MetricsTimeVaryingRate" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MetricsTimeVaryingRate" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructor - create a new metric |
| @param n the name of the metrics to be used to publish the metric]]> |
| </doc> |
| </constructor> |
| <method name="inc" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="numOps" type="int"/> |
| <param name="time" type="long"/> |
| <doc> |
| <![CDATA[Increment the metrics for numOps operations |
| @param numOps - number of operations |
| @param time - time for numOps operations]]> |
| </doc> |
| </method> |
| <method name="inc" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="time" type="long"/> |
| <doc> |
| <![CDATA[Increment the metrics for one operation |
| @param time for one operation]]> |
| </doc> |
| </method> |
| <method name="pushMetric" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="mr" type="org.apache.hadoop.metrics.MetricsRecord"/> |
| <doc> |
| <![CDATA[Push the delta metrics to the mr. |
| The delta is since the last push/interval. |
| |
| Note this does NOT push to JMX |
| (JMX gets the info via {@link #getPreviousIntervalAverageTime()} and |
| {@link #getPreviousIntervalNumOps()} |
| |
| @param mr]]> |
| </doc> |
| </method> |
| <method name="getPreviousIntervalNumOps" return="int" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The number of operations in the previous interval |
| @return - ops in prev interval]]> |
| </doc> |
| </method> |
| <method name="getPreviousIntervalAverageTime" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The average rate of an operation in the previous interval |
| @return - the average rate.]]> |
| </doc> |
| </method> |
| <method name="getMinTime" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The min time for a single operation since the last reset |
| {@link #resetMinMax()} |
| @return min time for an operation]]> |
| </doc> |
| </method> |
| <method name="getMaxTime" return="long" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The max time for a single operation since the last reset |
| {@link #resetMinMax()} |
| @return max time for an operation]]> |
| </doc> |
| </method> |
| <method name="resetMinMax" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Reset the min max values]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[The MetricsTimeVaryingRate class is for a rate based metric that |
| naturally varies over time (e.g. time taken to create a file). |
| The rate is averaged at each interval heart beat (the interval |
| is set in the metrics config file). |
| This class also keeps track of the min and max rates along with |
| a method to reset the min-max.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.metrics.util.MetricsTimeVaryingRate --> |
| </package> |
| <package name="org.apache.hadoop.net"> |
| <!-- start class org.apache.hadoop.net.DNS --> |
| <class name="DNS" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="DNS" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="reverseDns" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="hostIp" type="java.net.InetAddress"/> |
| <param name="ns" type="java.lang.String"/> |
| <exception name="NamingException" type="javax.naming.NamingException"/> |
| <doc> |
| <![CDATA[Returns the hostname associated with the specified IP address by the |
| provided nameserver. |
| |
| @param hostIp |
| The address to reverse lookup |
| @param ns |
| The host name of a reachable DNS server |
| @return The host name associated with the provided IP |
| @throws NamingException |
| If a NamingException is encountered]]> |
| </doc> |
| </method> |
| <method name="getIPs" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="strInterface" type="java.lang.String"/> |
| <exception name="UnknownHostException" type="java.net.UnknownHostException"/> |
| <doc> |
| <![CDATA[Returns all the IPs associated with the provided interface, if any, in |
| textual form. |
| |
| @param strInterface |
| The name of the network interface to query (e.g. eth0) |
| @return A string vector of all the IPs associated with the provided |
| interface |
| @throws UnknownHostException |
| If an UnknownHostException is encountered in querying the |
| default interface]]> |
| </doc> |
| </method> |
| <method name="getDefaultIP" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="strInterface" type="java.lang.String"/> |
| <exception name="UnknownHostException" type="java.net.UnknownHostException"/> |
| <doc> |
| <![CDATA[Returns the first available IP address associated with the provided |
| network interface |
| |
| @param strInterface |
| The name of the network interface to query (e.g. eth0) |
| @return The IP address in text form |
| @throws UnknownHostException |
| If one is encountered in querying the default interface]]> |
| </doc> |
| </method> |
| <method name="getHosts" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="strInterface" type="java.lang.String"/> |
| <param name="nameserver" type="java.lang.String"/> |
| <exception name="UnknownHostException" type="java.net.UnknownHostException"/> |
| <doc> |
| <![CDATA[Returns all the host names associated by the provided nameserver with the |
| address bound to the specified network interface |
| |
| @param strInterface |
| The name of the network interface to query (e.g. eth0) |
| @param nameserver |
| The DNS host name |
| @return A string vector of all host names associated with the IPs tied to |
| the specified interface |
| @throws UnknownHostException]]> |
| </doc> |
| </method> |
| <method name="getHosts" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="strInterface" type="java.lang.String"/> |
| <exception name="UnknownHostException" type="java.net.UnknownHostException"/> |
| <doc> |
| <![CDATA[Returns all the host names associated by the default nameserver with the |
| address bound to the specified network interface |
| |
| @param strInterface |
| The name of the network interface to query (e.g. eth0) |
| @return The list of host names associated with IPs bound to the network |
| interface |
| @throws UnknownHostException |
| If one is encountered while querying the deault interface]]> |
| </doc> |
| </method> |
| <method name="getDefaultHost" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="strInterface" type="java.lang.String"/> |
| <param name="nameserver" type="java.lang.String"/> |
| <exception name="UnknownHostException" type="java.net.UnknownHostException"/> |
| <doc> |
| <![CDATA[Returns the default (first) host name associated by the provided |
| nameserver with the address bound to the specified network interface |
| |
| @param strInterface |
| The name of the network interface to query (e.g. eth0) |
| @param nameserver |
| The DNS host name |
| @return The default host names associated with IPs bound to the network |
| interface |
| @throws UnknownHostException |
| If one is encountered while querying the deault interface]]> |
| </doc> |
| </method> |
| <method name="getDefaultHost" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="strInterface" type="java.lang.String"/> |
| <exception name="UnknownHostException" type="java.net.UnknownHostException"/> |
| <doc> |
| <![CDATA[Returns the default (first) host name associated by the default |
| nameserver with the address bound to the specified network interface |
| |
| @param strInterface |
| The name of the network interface to query (e.g. eth0) |
| @return The default host name associated with IPs bound to the network |
| interface |
| @throws UnknownHostException |
| If one is encountered while querying the deault interface]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A class that provides direct and reverse lookup functionalities, allowing |
| the querying of specific network interfaces or nameservers.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.net.DNS --> |
| <!-- start interface org.apache.hadoop.net.DNSToSwitchMapping --> |
| <interface name="DNSToSwitchMapping" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="resolve" return="java.util.List<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="names" type="java.util.List<java.lang.String>"/> |
| <doc> |
| <![CDATA[Resolves a list of DNS-names/IP-addresses and returns back a list of |
| switch information (network paths). One-to-one correspondence must be |
| maintained between the elements in the lists. |
| Consider an element in the argument list - x.y.com. The switch information |
| that is returned must be a network path of the form /foo/rack, |
| where / is the root, and 'foo' is the switch where 'rack' is connected. |
| Note the hostname/ip-address is not part of the returned path. |
| The network topology of the cluster would determine the number of |
| components in the network path. |
| @param names |
| @return list of resolved network paths]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[An interface that should be implemented to allow pluggable |
| DNS-name/IP-address to RackID resolvers.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.net.DNSToSwitchMapping --> |
| <!-- start class org.apache.hadoop.net.NetUtils --> |
| <class name="NetUtils" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="NetUtils" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getSocketFactory" return="javax.net.SocketFactory" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="clazz" type="java.lang.Class<?>"/> |
| <doc> |
| <![CDATA[Get the socket factory for the given class according to its |
| configuration parameter |
| <tt>hadoop.rpc.socket.factory.class.<ClassName></tt>. When no |
| such parameter exists then fall back on the default socket factory as |
| configured by <tt>hadoop.rpc.socket.factory.class.default</tt>. If |
| this default socket factory is not configured, then fall back on the JVM |
| default socket factory. |
| |
| @param conf the configuration |
| @param clazz the class (usually a {@link VersionedProtocol}) |
| @return a socket factory]]> |
| </doc> |
| </method> |
| <method name="getDefaultSocketFactory" return="javax.net.SocketFactory" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Get the default socket factory as specified by the configuration |
| parameter <tt>hadoop.rpc.socket.factory.default</tt> |
| |
| @param conf the configuration |
| @return the default socket factory as specified in the configuration or |
| the JVM default socket factory if the configuration does not |
| contain a default socket factory property.]]> |
| </doc> |
| </method> |
| <method name="getSocketFactoryFromProperty" return="javax.net.SocketFactory" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="propValue" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Get the socket factory corresponding to the given proxy URI. If the |
| given proxy URI corresponds to an absence of configuration parameter, |
| returns null. If the URI is malformed raises an exception. |
| |
| @param propValue the property which is the class name of the |
| SocketFactory to instantiate; assumed non null and non empty. |
| @return a socket factory as defined in the property value.]]> |
| </doc> |
| </method> |
| <method name="createSocketAddr" return="java.net.InetSocketAddress" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="target" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Util method to build socket addr from either: |
| <host>:<post> |
| <fs>://<host>:<port>/<path>]]> |
| </doc> |
| </method> |
| <method name="getServerAddress" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="oldBindAddressName" type="java.lang.String"/> |
| <param name="oldPortName" type="java.lang.String"/> |
| <param name="newBindAddressName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Handle the transition from pairs of attributes specifying a host and port |
| to a single colon separated one. |
| @param conf the configuration to check |
| @param oldBindAddressName the old address attribute name |
| @param oldPortName the old port attribute name |
| @param newBindAddressName the new combined name |
| @return the complete address from the configuration]]> |
| </doc> |
| </method> |
| <method name="addStaticResolution" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="host" type="java.lang.String"/> |
| <param name="resolvedName" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Adds a static resolution for host. This can be used for setting up |
| hostnames with names that are fake to point to a well known host. For e.g. |
| in some testcases we require to have daemons with different hostnames |
| running on the same machine. In order to create connections to these |
| daemons, one can set up mappings from those hostnames to "localhost". |
| {@link NetUtils#getStaticResolution(String)} can be used to query for |
| the actual hostname. |
| @param host |
| @param resolvedName]]> |
| </doc> |
| </method> |
| <method name="getStaticResolution" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="host" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Retrieves the resolved name for the passed host. The resolved name must |
| have been set earlier using |
| {@link NetUtils#addStaticResolution(String, String)} |
| @param host |
| @return the resolution]]> |
| </doc> |
| </method> |
| <method name="getAllStaticResolutions" return="java.util.List<java.lang.String[]>" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[This is used to get all the resolutions that were added using |
| {@link NetUtils#addStaticResolution(String, String)}. The return |
| value is a List each element of which contains an array of String |
| of the form String[0]=hostname, String[1]=resolved-hostname |
| @return the list of resolutions]]> |
| </doc> |
| </method> |
| <method name="getConnectAddress" return="java.net.InetSocketAddress" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="server" type="org.apache.hadoop.ipc.Server"/> |
| <doc> |
| <![CDATA[Returns InetSocketAddress that a client can use to |
| connect to the server. Server.getListenerAddress() is not correct when |
| the server binds to "0.0.0.0". This returns "127.0.0.1:port" when |
| the getListenerAddress() returns "0.0.0.0:port". |
| |
| @param server |
| @return socket address that a client can use to connect to the server.]]> |
| </doc> |
| </method> |
| <method name="getInputStream" return="java.io.InputStream" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="socket" type="java.net.Socket"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Same as getInputStream(socket, socket.getSoTimeout()).<br><br> |
| |
| From documentation for {@link #getInputStream(Socket, long)}:<br> |
| Returns InputStream for the socket. If the socket has an associated |
| SocketChannel then it returns a |
| {@link SocketInputStream} with the given timeout. If the socket does not |
| have a channel, {@link Socket#getInputStream()} is returned. In the later |
| case, the timeout argument is ignored and the timeout set with |
| {@link Socket#setSoTimeout(int)} applies for reads.<br><br> |
| |
| Any socket created using socket factories returned by {@link #NetUtils}, |
| must use this interface instead of {@link Socket#getInputStream()}. |
| |
| @see #getInputStream(Socket, long) |
| |
| @param socket |
| @return InputStream for reading from the socket. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getInputStream" return="java.io.InputStream" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="socket" type="java.net.Socket"/> |
| <param name="timeout" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns InputStream for the socket. If the socket has an associated |
| SocketChannel then it returns a |
| {@link SocketInputStream} with the given timeout. If the socket does not |
| have a channel, {@link Socket#getInputStream()} is returned. In the later |
| case, the timeout argument is ignored and the timeout set with |
| {@link Socket#setSoTimeout(int)} applies for reads.<br><br> |
| |
| Any socket created using socket factories returned by {@link #NetUtils}, |
| must use this interface instead of {@link Socket#getInputStream()}. |
| |
| @see Socket#getChannel() |
| |
| @param socket |
| @param timeout timeout in milliseconds. This may not always apply. zero |
| for waiting as long as necessary. |
| @return InputStream for reading from the socket. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getOutputStream" return="java.io.OutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="socket" type="java.net.Socket"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Same as getOutputStream(socket, 0). Timeout of zero implies write will |
| wait until data is available.<br><br> |
| |
| From documentation for {@link #getOutputStream(Socket, long)} : <br> |
| Returns OutputStream for the socket. If the socket has an associated |
| SocketChannel then it returns a |
| {@link SocketOutputStream} with the given timeout. If the socket does not |
| have a channel, {@link Socket#getOutputStream()} is returned. In the later |
| case, the timeout argument is ignored and the write will wait until |
| data is available.<br><br> |
| |
| Any socket created using socket factories returned by {@link #NetUtils}, |
| must use this interface instead of {@link Socket#getOutputStream()}. |
| |
| @see #getOutputStream(Socket, long) |
| |
| @param socket |
| @return OutputStream for writing to the socket. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="getOutputStream" return="java.io.OutputStream" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="socket" type="java.net.Socket"/> |
| <param name="timeout" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Returns OutputStream for the socket. If the socket has an associated |
| SocketChannel then it returns a |
| {@link SocketOutputStream} with the given timeout. If the socket does not |
| have a channel, {@link Socket#getOutputStream()} is returned. In the later |
| case, the timeout argument is ignored and the write will wait until |
| data is available.<br><br> |
| |
| Any socket created using socket factories returned by {@link #NetUtils}, |
| must use this interface instead of {@link Socket#getOutputStream()}. |
| |
| @see Socket#getChannel() |
| |
| @param socket |
| @param timeout timeout in milliseconds. This may not always apply. zero |
| for waiting as long as necessary. |
| @return OutputStream for writing to the socket. |
| @throws IOException]]> |
| </doc> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.net.NetUtils --> |
| <!-- start class org.apache.hadoop.net.NetworkTopology --> |
| <class name="NetworkTopology" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="NetworkTopology" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="add" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="node" type="org.apache.hadoop.net.Node"/> |
| <doc> |
| <![CDATA[Add a leaf node |
| Update node counter & rack counter if neccessary |
| @param node |
| node to be added |
| @exception IllegalArgumentException if add a node to a leave |
| or node to be added is not a leaf]]> |
| </doc> |
| </method> |
| <method name="remove" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="node" type="org.apache.hadoop.net.Node"/> |
| <doc> |
| <![CDATA[Remove a node |
| Update node counter & rack counter if neccessary |
| @param node |
| node to be removed]]> |
| </doc> |
| </method> |
| <method name="contains" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="node" type="org.apache.hadoop.net.Node"/> |
| <doc> |
| <![CDATA[Check if the tree contains node <i>node</i> |
| |
| @param node |
| a node |
| @return true if <i>node</i> is already in the tree; false otherwise]]> |
| </doc> |
| </method> |
| <method name="getNode" return="org.apache.hadoop.net.Node" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="loc" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Given a string representation of a node, return its reference |
| |
| @param loc |
| a path-like string representation of a node |
| @return a reference to the node; null if the node is not in the tree]]> |
| </doc> |
| </method> |
| <method name="getNumOfRacks" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the total number of racks]]> |
| </doc> |
| </method> |
| <method name="getNumOfLeaves" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the total number of nodes]]> |
| </doc> |
| </method> |
| <method name="getDistance" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="node1" type="org.apache.hadoop.net.Node"/> |
| <param name="node2" type="org.apache.hadoop.net.Node"/> |
| <doc> |
| <![CDATA[Return the distance between two nodes |
| It is assumed that the distance from one node to its parent is 1 |
| The distance between two nodes is calculated by summing up their distances |
| to their closest common ancestor. |
| @param node1 one node |
| @param node2 another node |
| @return the distance between node1 and node2 |
| node1 or node2 do not belong to the cluster]]> |
| </doc> |
| </method> |
| <method name="isOnSameRack" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="node1" type="org.apache.hadoop.net.Node"/> |
| <param name="node2" type="org.apache.hadoop.net.Node"/> |
| <doc> |
| <![CDATA[Check if two nodes are on the same rack |
| @param node1 one node |
| @param node2 another node |
| @return true if node1 and node2 are pm the same rack; false otherwise |
| @exception IllegalArgumentException when either node1 or node2 is null, or |
| node1 or node2 do not belong to the cluster]]> |
| </doc> |
| </method> |
| <method name="chooseRandom" return="org.apache.hadoop.net.Node" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="scope" type="java.lang.String"/> |
| <doc> |
| <![CDATA[randomly choose one node from <i>scope</i> |
| if scope starts with ~, choose one from the all nodes except for the |
| ones in <i>scope</i>; otherwise, choose one from <i>scope</i> |
| @param scope range of nodes from which a node will be choosen |
| @return the choosen node]]> |
| </doc> |
| </method> |
| <method name="countNumOfAvailableNodes" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="scope" type="java.lang.String"/> |
| <param name="excludedNodes" type="java.util.List<org.apache.hadoop.net.Node>"/> |
| <doc> |
| <![CDATA[return the number of leaves in <i>scope</i> but not in <i>excludedNodes</i> |
| if scope starts with ~, return the number of nodes that are not |
| in <i>scope</i> and <i>excludedNodes</i>; |
| @param scope a path string that may start with ~ |
| @param excludedNodes a list of nodes |
| @return number of available nodes]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[convert a network tree to a string]]> |
| </doc> |
| </method> |
| <method name="pseudoSortByDistance" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="reader" type="org.apache.hadoop.net.Node"/> |
| <param name="nodes" type="org.apache.hadoop.net.Node[]"/> |
| <doc> |
| <![CDATA[Sort nodes array by their distances to <i>reader</i> |
| It linearly scans the array, if a local node is found, swap it with |
| the first element of the array. |
| If a local rack node is found, swap it with the first element following |
| the local node. |
| If neither local node or local rack node is found, put a random replica |
| location at postion 0. |
| It leaves the rest nodes untouched.]]> |
| </doc> |
| </method> |
| <field name="DEFAULT_RACK" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="UNRESOLVED" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="DEFAULT_HOST_LEVEL" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[The class represents a cluster of computer with a tree hierarchical |
| network topology. |
| For example, a cluster may be consists of many data centers filled |
| with racks of computers. |
| In a network topology, leaves represent data nodes (computers) and inner |
| nodes represent switches/routers that manage traffic in/out of data centers |
| or racks.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.net.NetworkTopology --> |
| <!-- start interface org.apache.hadoop.net.Node --> |
| <interface name="Node" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getNetworkLocation" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the string representation of this node's network location]]> |
| </doc> |
| </method> |
| <method name="setNetworkLocation" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="location" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set the node's network location]]> |
| </doc> |
| </method> |
| <method name="getName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return this node's name]]> |
| </doc> |
| </method> |
| <method name="getParent" return="org.apache.hadoop.net.Node" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return this node's parent]]> |
| </doc> |
| </method> |
| <method name="setParent" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="parent" type="org.apache.hadoop.net.Node"/> |
| <doc> |
| <![CDATA[Set this node's parent]]> |
| </doc> |
| </method> |
| <method name="getLevel" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return this node's level in the tree. |
| E.g. the root of a tree returns 0 and its children return 1]]> |
| </doc> |
| </method> |
| <method name="setLevel" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| <doc> |
| <![CDATA[Set this node's level in the tree.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[The interface defines a node in a network topology. |
| A node may be a leave representing a data node or an inner |
| node representing a datacenter or rack. |
| Each data has a name and its location in the network is |
| decided by a string with syntax similar to a file name. |
| For example, a data node's name is hostname:port# and if it's located at |
| rack "orange" in datacenter "dog", the string representation of its |
| network location is /dog/orange]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.net.Node --> |
| <!-- start class org.apache.hadoop.net.NodeBase --> |
| <class name="NodeBase" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.net.Node"/> |
| <constructor name="NodeBase" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Default constructor]]> |
| </doc> |
| </constructor> |
| <constructor name="NodeBase" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a node from its path |
| @param path |
| a concatenation of this node's location, the path seperator, and its name]]> |
| </doc> |
| </constructor> |
| <constructor name="NodeBase" type="java.lang.String, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a node from its name and its location |
| @param name this node's name |
| @param location this node's location]]> |
| </doc> |
| </constructor> |
| <constructor name="NodeBase" type="java.lang.String, java.lang.String, org.apache.hadoop.net.Node, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a node from its name and its location |
| @param name this node's name |
| @param location this node's location |
| @param parent this node's parent node |
| @param level this node's level in the tree]]> |
| </doc> |
| </constructor> |
| <method name="getName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return this node's name]]> |
| </doc> |
| </method> |
| <method name="getNetworkLocation" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return this node's network location]]> |
| </doc> |
| </method> |
| <method name="setNetworkLocation" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="location" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Set this node's network location]]> |
| </doc> |
| </method> |
| <method name="getPath" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="node" type="org.apache.hadoop.net.Node"/> |
| <doc> |
| <![CDATA[Return this node's path]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return this node's string representation]]> |
| </doc> |
| </method> |
| <method name="normalize" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="path" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Normalize a path]]> |
| </doc> |
| </method> |
| <method name="getParent" return="org.apache.hadoop.net.Node" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return this node's parent]]> |
| </doc> |
| </method> |
| <method name="setParent" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="parent" type="org.apache.hadoop.net.Node"/> |
| <doc> |
| <![CDATA[Set this node's parent]]> |
| </doc> |
| </method> |
| <method name="getLevel" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return this node's level in the tree. |
| E.g. the root of a tree returns 0 and its children return 1]]> |
| </doc> |
| </method> |
| <method name="setLevel" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="level" type="int"/> |
| <doc> |
| <![CDATA[Set this node's level in the tree]]> |
| </doc> |
| </method> |
| <field name="PATH_SEPARATOR" type="char" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="PATH_SEPARATOR_STR" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="ROOT" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="name" type="java.lang.String" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="location" type="java.lang.String" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="level" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="parent" type="org.apache.hadoop.net.Node" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[A base class that implements interface Node]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.net.NodeBase --> |
| <!-- start class org.apache.hadoop.net.ScriptBasedMapping --> |
| <class name="ScriptBasedMapping" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.conf.Configurable"/> |
| <implements name="org.apache.hadoop.net.DNSToSwitchMapping"/> |
| <constructor name="ScriptBasedMapping" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| </method> |
| <method name="getConf" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="resolve" return="java.util.List<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="names" type="java.util.List<java.lang.String>"/> |
| </method> |
| <doc> |
| <![CDATA[This class implements the {@link DNSToSwitchMapping} interface using a |
| script configured via topology.script.file.name .]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.net.ScriptBasedMapping --> |
| <!-- start class org.apache.hadoop.net.SocketInputStream --> |
| <class name="SocketInputStream" extends="java.io.InputStream" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.nio.channels.ReadableByteChannel"/> |
| <constructor name="SocketInputStream" type="java.nio.channels.ReadableByteChannel, long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a new input stream with the given timeout. If the timeout |
| is zero, it will be treated as infinite timeout. The socket's |
| channel will be configured to be non-blocking. |
| |
| @param channel |
| Channel for reading, should also be a {@link SelectableChannel}. |
| The channel will be configured to be non-blocking. |
| @param timeout timeout in milliseconds. must not be negative. |
| @throws IOException]]> |
| </doc> |
| </constructor> |
| <constructor name="SocketInputStream" type="java.net.Socket, long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Same as SocketInputStream(socket.getChannel(), timeout): <br><br> |
| |
| Create a new input stream with the given timeout. If the timeout |
| is zero, it will be treated as infinite timeout. The socket's |
| channel will be configured to be non-blocking. |
| |
| @see SocketInputStream#SocketInputStream(ReadableByteChannel, long) |
| |
| @param socket should have a channel associated with it. |
| @param timeout timeout timeout in milliseconds. must not be negative. |
| @throws IOException]]> |
| </doc> |
| </constructor> |
| <constructor name="SocketInputStream" type="java.net.Socket" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Same as SocketInputStream(socket.getChannel(), socket.getSoTimeout()) |
| :<br><br> |
| |
| Create a new input stream with the given timeout. If the timeout |
| is zero, it will be treated as infinite timeout. The socket's |
| channel will be configured to be non-blocking. |
| @see SocketInputStream#SocketInputStream(ReadableByteChannel, long) |
| |
| @param socket should have a channel associated with it. |
| @throws IOException]]> |
| </doc> |
| </constructor> |
| <method name="read" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="read" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getChannel" return="java.nio.channels.ReadableByteChannel" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns underlying channel used by inputstream. |
| This is useful in certain cases like channel for |
| {@link FileChannel#transferFrom(ReadableByteChannel, long, long)}.]]> |
| </doc> |
| </method> |
| <method name="isOpen" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="read" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dst" type="java.nio.ByteBuffer"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[This implements an input stream that can have a timeout while reading. |
| This sets non-blocking flag on the socket channel. |
| So after create this object, read() on |
| {@link Socket#getInputStream()} and write() on |
| {@link Socket#getOutputStream()} for the associated socket will throw |
| IllegalBlockingModeException. |
| Please use {@link SocketOutputStream} for writing.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.net.SocketInputStream --> |
| <!-- start class org.apache.hadoop.net.SocketOutputStream --> |
| <class name="SocketOutputStream" extends="java.io.OutputStream" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.nio.channels.WritableByteChannel"/> |
| <constructor name="SocketOutputStream" type="java.nio.channels.WritableByteChannel, long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Create a new ouput stream with the given timeout. If the timeout |
| is zero, it will be treated as infinite timeout. The socket's |
| channel will be configured to be non-blocking. |
| |
| @param channel |
| Channel for writing, should also be a {@link SelectableChannel}. |
| The channel will be configured to be non-blocking. |
| @param timeout timeout in milliseconds. must not be negative. |
| @throws IOException]]> |
| </doc> |
| </constructor> |
| <constructor name="SocketOutputStream" type="java.net.Socket, long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Same as SocketOutputStream(socket.getChannel(), timeout):<br><br> |
| |
| Create a new ouput stream with the given timeout. If the timeout |
| is zero, it will be treated as infinite timeout. The socket's |
| channel will be configured to be non-blocking. |
| |
| @see SocketOutputStream#SocketOutputStream(WritableByteChannel, long) |
| |
| @param socket should have a channel associated with it. |
| @param timeout timeout timeout in milliseconds. must not be negative. |
| @throws IOException]]> |
| </doc> |
| </constructor> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte[]"/> |
| <param name="off" type="int"/> |
| <param name="len" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="close" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getChannel" return="java.nio.channels.WritableByteChannel" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns underlying channel used by this stream. |
| This is useful in certain cases like channel for |
| {@link FileChannel#transferTo(long, long, WritableByteChannel)}]]> |
| </doc> |
| </method> |
| <method name="isOpen" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="write" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="java.nio.ByteBuffer"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[This implements an output stream that can have a timeout while writing. |
| This sets non-blocking flag on the socket channel. |
| So after creating this object , read() on |
| {@link Socket#getInputStream()} and write() on |
| {@link Socket#getOutputStream()} on the associated socket will throw |
| llegalBlockingModeException. |
| Please use {@link SocketInputStream} for reading.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.net.SocketOutputStream --> |
| <!-- start class org.apache.hadoop.net.SocksSocketFactory --> |
| <class name="SocksSocketFactory" extends="javax.net.SocketFactory" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.conf.Configurable"/> |
| <constructor name="SocksSocketFactory" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Default empty constructor (for use with the reflection API).]]> |
| </doc> |
| </constructor> |
| <constructor name="SocksSocketFactory" type="java.net.Proxy" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructor with a supplied Proxy |
| |
| @param proxy the proxy to use to create sockets]]> |
| </doc> |
| </constructor> |
| <method name="createSocket" return="java.net.Socket" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createSocket" return="java.net.Socket" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="addr" type="java.net.InetAddress"/> |
| <param name="port" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createSocket" return="java.net.Socket" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="addr" type="java.net.InetAddress"/> |
| <param name="port" type="int"/> |
| <param name="localHostAddr" type="java.net.InetAddress"/> |
| <param name="localPort" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createSocket" return="java.net.Socket" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="host" type="java.lang.String"/> |
| <param name="port" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <exception name="UnknownHostException" type="java.net.UnknownHostException"/> |
| </method> |
| <method name="createSocket" return="java.net.Socket" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="host" type="java.lang.String"/> |
| <param name="port" type="int"/> |
| <param name="localHostAddr" type="java.net.InetAddress"/> |
| <param name="localPort" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <exception name="UnknownHostException" type="java.net.UnknownHostException"/> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="obj" type="java.lang.Object"/> |
| </method> |
| <method name="getConf" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| </method> |
| <doc> |
| <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.net.SocksSocketFactory --> |
| <!-- start class org.apache.hadoop.net.StandardSocketFactory --> |
| <class name="StandardSocketFactory" extends="javax.net.SocketFactory" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="StandardSocketFactory" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Default empty constructor (for use with the reflection API).]]> |
| </doc> |
| </constructor> |
| <method name="createSocket" return="java.net.Socket" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createSocket" return="java.net.Socket" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="addr" type="java.net.InetAddress"/> |
| <param name="port" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createSocket" return="java.net.Socket" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="addr" type="java.net.InetAddress"/> |
| <param name="port" type="int"/> |
| <param name="localHostAddr" type="java.net.InetAddress"/> |
| <param name="localPort" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="createSocket" return="java.net.Socket" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="host" type="java.lang.String"/> |
| <param name="port" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <exception name="UnknownHostException" type="java.net.UnknownHostException"/> |
| </method> |
| <method name="createSocket" return="java.net.Socket" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="host" type="java.lang.String"/> |
| <param name="port" type="int"/> |
| <param name="localHostAddr" type="java.net.InetAddress"/> |
| <param name="localPort" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <exception name="UnknownHostException" type="java.net.UnknownHostException"/> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="obj" type="java.lang.Object"/> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[Specialized SocketFactory to create sockets with a SOCKS proxy]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.net.StandardSocketFactory --> |
| <doc> |
| <![CDATA[Network-related classes.]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.record"> |
| <!-- start class org.apache.hadoop.record.BinaryRecordInput --> |
| <class name="BinaryRecordInput" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.record.RecordInput"/> |
| <constructor name="BinaryRecordInput" type="java.io.InputStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of BinaryRecordInput]]> |
| </doc> |
| </constructor> |
| <constructor name="BinaryRecordInput" type="java.io.DataInput" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of BinaryRecordInput]]> |
| </doc> |
| </constructor> |
| <method name="get" return="org.apache.hadoop.record.BinaryRecordInput" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="inp" type="java.io.DataInput"/> |
| <doc> |
| <![CDATA[Get a thread-local record input for the supplied DataInput. |
| @param inp data input stream |
| @return binary record input corresponding to the supplied DataInput.]]> |
| </doc> |
| </method> |
| <method name="readByte" return="byte" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readBool" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readInt" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readLong" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFloat" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readDouble" return="double" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readBuffer" return="org.apache.hadoop.record.Buffer" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startVector" return="org.apache.hadoop.record.Index" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endVector" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startMap" return="org.apache.hadoop.record.Index" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endMap" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.record.BinaryRecordInput --> |
| <!-- start class org.apache.hadoop.record.BinaryRecordOutput --> |
| <class name="BinaryRecordOutput" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.record.RecordOutput"/> |
| <constructor name="BinaryRecordOutput" type="java.io.OutputStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of BinaryRecordOutput]]> |
| </doc> |
| </constructor> |
| <constructor name="BinaryRecordOutput" type="java.io.DataOutput" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of BinaryRecordOutput]]> |
| </doc> |
| </constructor> |
| <method name="get" return="org.apache.hadoop.record.BinaryRecordOutput" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <doc> |
| <![CDATA[Get a thread-local record output for the supplied DataOutput. |
| @param out data output stream |
| @return binary record output corresponding to the supplied DataOutput.]]> |
| </doc> |
| </method> |
| <method name="writeByte" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeBool" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="boolean"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeInt" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeLong" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="l" type="long"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeFloat" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="float"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeDouble" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="d" type="double"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeString" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="s" type="java.lang.String"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeBuffer" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="buf" type="org.apache.hadoop.record.Buffer"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="r" type="org.apache.hadoop.record.Record"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="r" type="org.apache.hadoop.record.Record"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startVector" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="v" type="java.util.ArrayList"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endVector" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="v" type="java.util.ArrayList"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startMap" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="v" type="java.util.TreeMap"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endMap" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="v" type="java.util.TreeMap"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.record.BinaryRecordOutput --> |
| <!-- start class org.apache.hadoop.record.Buffer --> |
| <class name="Buffer" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="java.lang.Comparable"/> |
| <implements name="java.lang.Cloneable"/> |
| <constructor name="Buffer" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a zero-count sequence.]]> |
| </doc> |
| </constructor> |
| <constructor name="Buffer" type="byte[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a Buffer using the byte array as the initial value. |
| |
| @param bytes This array becomes the backing storage for the object.]]> |
| </doc> |
| </constructor> |
| <constructor name="Buffer" type="byte[], int, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a Buffer using the byte range as the initial value. |
| |
| @param bytes Copy of this array becomes the backing storage for the object. |
| @param offset offset into byte array |
| @param length length of data]]> |
| </doc> |
| </constructor> |
| <method name="set" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <doc> |
| <![CDATA[Use the specified bytes array as underlying sequence. |
| |
| @param bytes byte sequence]]> |
| </doc> |
| </method> |
| <method name="copy" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="length" type="int"/> |
| <doc> |
| <![CDATA[Copy the specified byte array to the Buffer. Replaces the current buffer. |
| |
| @param bytes byte array to be assigned |
| @param offset offset into byte array |
| @param length length of data]]> |
| </doc> |
| </method> |
| <method name="get" return="byte[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the data from the Buffer. |
| |
| @return The data is only valid between 0 and getCount() - 1.]]> |
| </doc> |
| </method> |
| <method name="getCount" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the current count of the buffer.]]> |
| </doc> |
| </method> |
| <method name="getCapacity" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the capacity, which is the maximum count that could handled without |
| resizing the backing storage. |
| |
| @return The number of bytes]]> |
| </doc> |
| </method> |
| <method name="setCapacity" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="newCapacity" type="int"/> |
| <doc> |
| <![CDATA[Change the capacity of the backing storage. |
| The data is preserved if newCapacity >= getCount(). |
| @param newCapacity The new capacity in bytes.]]> |
| </doc> |
| </method> |
| <method name="reset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Reset the buffer to 0 size]]> |
| </doc> |
| </method> |
| <method name="truncate" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Change the capacity of the backing store to be the same as the current |
| count of buffer.]]> |
| </doc> |
| </method> |
| <method name="append" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <param name="offset" type="int"/> |
| <param name="length" type="int"/> |
| <doc> |
| <![CDATA[Append specified bytes to the buffer. |
| |
| @param bytes byte array to be appended |
| @param offset offset into byte array |
| @param length length of data]]> |
| </doc> |
| </method> |
| <method name="append" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <doc> |
| <![CDATA[Append specified bytes to the buffer |
| |
| @param bytes byte array to be appended]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="other" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Define the sort order of the Buffer. |
| |
| @param other The other buffer |
| @return Positive if this is bigger than other, 0 if they are equal, and |
| negative if this is smaller than other.]]> |
| </doc> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="other" type="java.lang.Object"/> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="charsetName" type="java.lang.String"/> |
| <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/> |
| <doc> |
| <![CDATA[Convert the byte buffer to a string an specific character encoding |
| |
| @param charsetName Valid Java Character Set Name]]> |
| </doc> |
| </method> |
| <method name="clone" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="CloneNotSupportedException" type="java.lang.CloneNotSupportedException"/> |
| </method> |
| <doc> |
| <![CDATA[A byte sequence that is used as a Java native type for buffer. |
| It is resizable and distinguishes between the count of the seqeunce and |
| the current capacity.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.Buffer --> |
| <!-- start class org.apache.hadoop.record.CsvRecordInput --> |
| <class name="CsvRecordInput" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.record.RecordInput"/> |
| <constructor name="CsvRecordInput" type="java.io.InputStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of CsvRecordInput]]> |
| </doc> |
| </constructor> |
| <method name="readByte" return="byte" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readBool" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readInt" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readLong" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFloat" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readDouble" return="double" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readBuffer" return="org.apache.hadoop.record.Buffer" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startVector" return="org.apache.hadoop.record.Index" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endVector" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startMap" return="org.apache.hadoop.record.Index" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endMap" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.record.CsvRecordInput --> |
| <!-- start class org.apache.hadoop.record.CsvRecordOutput --> |
| <class name="CsvRecordOutput" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.record.RecordOutput"/> |
| <constructor name="CsvRecordOutput" type="java.io.OutputStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of CsvRecordOutput]]> |
| </doc> |
| </constructor> |
| <method name="writeByte" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeBool" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="boolean"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeInt" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeLong" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="l" type="long"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeFloat" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="float"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeDouble" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="d" type="double"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeString" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="s" type="java.lang.String"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeBuffer" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="buf" type="org.apache.hadoop.record.Buffer"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="r" type="org.apache.hadoop.record.Record"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="r" type="org.apache.hadoop.record.Record"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startVector" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="v" type="java.util.ArrayList"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endVector" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="v" type="java.util.ArrayList"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startMap" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="v" type="java.util.TreeMap"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endMap" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="v" type="java.util.TreeMap"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.record.CsvRecordOutput --> |
| <!-- start interface org.apache.hadoop.record.Index --> |
| <interface name="Index" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="done" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="incr" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[Interface that acts as an iterator for deserializing maps. |
| The deserializer returns an instance that the record uses to |
| read vectors and maps. An example of usage is as follows: |
| |
| <code> |
| Index idx = startVector(...); |
| while (!idx.done()) { |
| .... // read element of a vector |
| idx.incr(); |
| } |
| </code>]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.record.Index --> |
| <!-- start class org.apache.hadoop.record.Record --> |
| <class name="Record" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.WritableComparable"/> |
| <implements name="java.lang.Cloneable"/> |
| <constructor name="Record" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="serialize" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="rout" type="org.apache.hadoop.record.RecordOutput"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Serialize a record with tag (ususally field name) |
| @param rout Record output destination |
| @param tag record tag (Used only in tagged serialization e.g. XML)]]> |
| </doc> |
| </method> |
| <method name="deserialize" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="rin" type="org.apache.hadoop.record.RecordInput"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Deserialize a record with a tag (usually field name) |
| @param rin Record input source |
| @param tag Record tag (Used only in tagged serialization e.g. XML)]]> |
| </doc> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="peer" type="java.lang.Object"/> |
| <exception name="ClassCastException" type="java.lang.ClassCastException"/> |
| </method> |
| <method name="serialize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="rout" type="org.apache.hadoop.record.RecordOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Serialize a record without a tag |
| @param rout Record output destination]]> |
| </doc> |
| </method> |
| <method name="deserialize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="rin" type="org.apache.hadoop.record.RecordInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Deserialize a record without a tag |
| @param rin Record input source]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="din" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[Abstract class that is extended by generated classes.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.Record --> |
| <!-- start class org.apache.hadoop.record.RecordComparator --> |
| <class name="RecordComparator" extends="org.apache.hadoop.io.WritableComparator" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="RecordComparator" type="java.lang.Class" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a raw {@link Record} comparison implementation.]]> |
| </doc> |
| </constructor> |
| <method name="compare" return="int" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| </method> |
| <method name="define" |
| abstract="false" native="false" synchronized="true" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class"/> |
| <param name="comparator" type="org.apache.hadoop.record.RecordComparator"/> |
| <doc> |
| <![CDATA[Register an optimized comparator for a {@link Record} implementation. |
| |
| @param c record classs for which a raw comparator is provided |
| @param comparator Raw comparator instance for class c]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A raw record comparator base class]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.RecordComparator --> |
| <!-- start interface org.apache.hadoop.record.RecordInput --> |
| <interface name="RecordInput" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="readByte" return="byte" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read a byte from serialized record. |
| @param tag Used by tagged serialization formats (such as XML) |
| @return value read from serialized record.]]> |
| </doc> |
| </method> |
| <method name="readBool" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read a boolean from serialized record. |
| @param tag Used by tagged serialization formats (such as XML) |
| @return value read from serialized record.]]> |
| </doc> |
| </method> |
| <method name="readInt" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read an integer from serialized record. |
| @param tag Used by tagged serialization formats (such as XML) |
| @return value read from serialized record.]]> |
| </doc> |
| </method> |
| <method name="readLong" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read a long integer from serialized record. |
| @param tag Used by tagged serialization formats (such as XML) |
| @return value read from serialized record.]]> |
| </doc> |
| </method> |
| <method name="readFloat" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read a single-precision float from serialized record. |
| @param tag Used by tagged serialization formats (such as XML) |
| @return value read from serialized record.]]> |
| </doc> |
| </method> |
| <method name="readDouble" return="double" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read a double-precision number from serialized record. |
| @param tag Used by tagged serialization formats (such as XML) |
| @return value read from serialized record.]]> |
| </doc> |
| </method> |
| <method name="readString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read a UTF-8 encoded string from serialized record. |
| @param tag Used by tagged serialization formats (such as XML) |
| @return value read from serialized record.]]> |
| </doc> |
| </method> |
| <method name="readBuffer" return="org.apache.hadoop.record.Buffer" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read byte array from serialized record. |
| @param tag Used by tagged serialization formats (such as XML) |
| @return value read from serialized record.]]> |
| </doc> |
| </method> |
| <method name="startRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Check the mark for start of the serialized record. |
| @param tag Used by tagged serialization formats (such as XML)]]> |
| </doc> |
| </method> |
| <method name="endRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Check the mark for end of the serialized record. |
| @param tag Used by tagged serialization formats (such as XML)]]> |
| </doc> |
| </method> |
| <method name="startVector" return="org.apache.hadoop.record.Index" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Check the mark for start of the serialized vector. |
| @param tag Used by tagged serialization formats (such as XML) |
| @return Index that is used to count the number of elements.]]> |
| </doc> |
| </method> |
| <method name="endVector" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Check the mark for end of the serialized vector. |
| @param tag Used by tagged serialization formats (such as XML)]]> |
| </doc> |
| </method> |
| <method name="startMap" return="org.apache.hadoop.record.Index" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Check the mark for start of the serialized map. |
| @param tag Used by tagged serialization formats (such as XML) |
| @return Index that is used to count the number of map entries.]]> |
| </doc> |
| </method> |
| <method name="endMap" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Check the mark for end of the serialized map. |
| @param tag Used by tagged serialization formats (such as XML)]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Interface that all the Deserializers have to implement.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.record.RecordInput --> |
| <!-- start interface org.apache.hadoop.record.RecordOutput --> |
| <interface name="RecordOutput" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="writeByte" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write a byte to serialized record. |
| @param b Byte to be serialized |
| @param tag Used by tagged serialization formats (such as XML) |
| @throws IOException Indicates error in serialization]]> |
| </doc> |
| </method> |
| <method name="writeBool" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="boolean"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write a boolean to serialized record. |
| @param b Boolean to be serialized |
| @param tag Used by tagged serialization formats (such as XML) |
| @throws IOException Indicates error in serialization]]> |
| </doc> |
| </method> |
| <method name="writeInt" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write an integer to serialized record. |
| @param i Integer to be serialized |
| @param tag Used by tagged serialization formats (such as XML) |
| @throws IOException Indicates error in serialization]]> |
| </doc> |
| </method> |
| <method name="writeLong" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="l" type="long"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write a long integer to serialized record. |
| @param l Long to be serialized |
| @param tag Used by tagged serialization formats (such as XML) |
| @throws IOException Indicates error in serialization]]> |
| </doc> |
| </method> |
| <method name="writeFloat" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="float"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write a single-precision float to serialized record. |
| @param f Float to be serialized |
| @param tag Used by tagged serialization formats (such as XML) |
| @throws IOException Indicates error in serialization]]> |
| </doc> |
| </method> |
| <method name="writeDouble" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="d" type="double"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write a double precision floating point number to serialized record. |
| @param d Double to be serialized |
| @param tag Used by tagged serialization formats (such as XML) |
| @throws IOException Indicates error in serialization]]> |
| </doc> |
| </method> |
| <method name="writeString" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="s" type="java.lang.String"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write a unicode string to serialized record. |
| @param s String to be serialized |
| @param tag Used by tagged serialization formats (such as XML) |
| @throws IOException Indicates error in serialization]]> |
| </doc> |
| </method> |
| <method name="writeBuffer" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="buf" type="org.apache.hadoop.record.Buffer"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Write a buffer to serialized record. |
| @param buf Buffer to be serialized |
| @param tag Used by tagged serialization formats (such as XML) |
| @throws IOException Indicates error in serialization]]> |
| </doc> |
| </method> |
| <method name="startRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="r" type="org.apache.hadoop.record.Record"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Mark the start of a record to be serialized. |
| @param r Record to be serialized |
| @param tag Used by tagged serialization formats (such as XML) |
| @throws IOException Indicates error in serialization]]> |
| </doc> |
| </method> |
| <method name="endRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="r" type="org.apache.hadoop.record.Record"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Mark the end of a serialized record. |
| @param r Record to be serialized |
| @param tag Used by tagged serialization formats (such as XML) |
| @throws IOException Indicates error in serialization]]> |
| </doc> |
| </method> |
| <method name="startVector" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="v" type="java.util.ArrayList"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Mark the start of a vector to be serialized. |
| @param v Vector to be serialized |
| @param tag Used by tagged serialization formats (such as XML) |
| @throws IOException Indicates error in serialization]]> |
| </doc> |
| </method> |
| <method name="endVector" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="v" type="java.util.ArrayList"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Mark the end of a serialized vector. |
| @param v Vector to be serialized |
| @param tag Used by tagged serialization formats (such as XML) |
| @throws IOException Indicates error in serialization]]> |
| </doc> |
| </method> |
| <method name="startMap" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="m" type="java.util.TreeMap"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Mark the start of a map to be serialized. |
| @param m Map to be serialized |
| @param tag Used by tagged serialization formats (such as XML) |
| @throws IOException Indicates error in serialization]]> |
| </doc> |
| </method> |
| <method name="endMap" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="m" type="java.util.TreeMap"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Mark the end of a serialized map. |
| @param m Map to be serialized |
| @param tag Used by tagged serialization formats (such as XML) |
| @throws IOException Indicates error in serialization]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Interface that alll the serializers have to implement.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.record.RecordOutput --> |
| <!-- start class org.apache.hadoop.record.Utils --> |
| <class name="Utils" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="readFloat" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <param name="start" type="int"/> |
| <doc> |
| <![CDATA[Parse a float from a byte array.]]> |
| </doc> |
| </method> |
| <method name="readDouble" return="double" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <param name="start" type="int"/> |
| <doc> |
| <![CDATA[Parse a double from a byte array.]]> |
| </doc> |
| </method> |
| <method name="readVLong" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <param name="start" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Reads a zero-compressed encoded long from a byte array and returns it. |
| @param bytes byte array with decode long |
| @param start starting index |
| @throws java.io.IOException |
| @return deserialized long]]> |
| </doc> |
| </method> |
| <method name="readVInt" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <param name="start" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Reads a zero-compressed encoded integer from a byte array and returns it. |
| @param bytes byte array with the encoded integer |
| @param start start index |
| @throws java.io.IOException |
| @return deserialized integer]]> |
| </doc> |
| </method> |
| <method name="readVLong" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Reads a zero-compressed encoded long from a stream and return it. |
| @param in input stream |
| @throws java.io.IOException |
| @return deserialized long]]> |
| </doc> |
| </method> |
| <method name="readVInt" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Reads a zero-compressed encoded integer from a stream and returns it. |
| @param in input stream |
| @throws java.io.IOException |
| @return deserialized integer]]> |
| </doc> |
| </method> |
| <method name="getVIntSize" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="long"/> |
| <doc> |
| <![CDATA[Get the encoded length if an integer is stored in a variable-length format |
| @return the encoded length]]> |
| </doc> |
| </method> |
| <method name="writeVLong" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="stream" type="java.io.DataOutput"/> |
| <param name="i" type="long"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Serializes a long to a binary stream with zero-compressed encoding. |
| For -112 <= i <= 127, only one byte is used with the actual value. |
| For other values of i, the first byte value indicates whether the |
| long is positive or negative, and the number of bytes that follow. |
| If the first byte value v is between -113 and -120, the following long |
| is positive, with number of bytes that follow are -(v+112). |
| If the first byte value v is between -121 and -128, the following long |
| is negative, with number of bytes that follow are -(v+120). Bytes are |
| stored in the high-non-zero-byte-first order. |
| |
| @param stream Binary output stream |
| @param i Long to be serialized |
| @throws java.io.IOException]]> |
| </doc> |
| </method> |
| <method name="writeVInt" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="stream" type="java.io.DataOutput"/> |
| <param name="i" type="int"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Serializes an int to a binary stream with zero-compressed encoding. |
| |
| @param stream Binary output stream |
| @param i int to be serialized |
| @throws java.io.IOException]]> |
| </doc> |
| </method> |
| <method name="compareBytes" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| <doc> |
| <![CDATA[Lexicographic order of binary data.]]> |
| </doc> |
| </method> |
| <field name="hexchars" type="char[]" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Various utility functions for Hadooop record I/O runtime.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.Utils --> |
| <!-- start class org.apache.hadoop.record.XmlRecordInput --> |
| <class name="XmlRecordInput" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.record.RecordInput"/> |
| <constructor name="XmlRecordInput" type="java.io.InputStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of XmlRecordInput]]> |
| </doc> |
| </constructor> |
| <method name="readByte" return="byte" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readBool" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readInt" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readLong" return="long" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readFloat" return="float" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readDouble" return="double" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="readBuffer" return="org.apache.hadoop.record.Buffer" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startVector" return="org.apache.hadoop.record.Index" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endVector" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startMap" return="org.apache.hadoop.record.Index" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endMap" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[XML Deserializer.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.XmlRecordInput --> |
| <!-- start class org.apache.hadoop.record.XmlRecordOutput --> |
| <class name="XmlRecordOutput" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.record.RecordOutput"/> |
| <constructor name="XmlRecordOutput" type="java.io.OutputStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of XmlRecordOutput]]> |
| </doc> |
| </constructor> |
| <method name="writeByte" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="byte"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeBool" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b" type="boolean"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeInt" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeLong" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="l" type="long"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeFloat" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="f" type="float"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeDouble" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="d" type="double"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeString" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="s" type="java.lang.String"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="writeBuffer" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="buf" type="org.apache.hadoop.record.Buffer"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="r" type="org.apache.hadoop.record.Record"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="r" type="org.apache.hadoop.record.Record"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startVector" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="v" type="java.util.ArrayList"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endVector" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="v" type="java.util.ArrayList"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="startMap" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="v" type="java.util.TreeMap"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="endMap" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="v" type="java.util.TreeMap"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[XML Serializer.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.XmlRecordOutput --> |
| <doc> |
| <![CDATA[Hadoop record I/O contains classes and a record description language |
| translator for simplifying serialization and deserialization of records in a |
| language-neutral manner. |
| |
| <h2>Introduction</h2> |
| |
| Software systems of any significant complexity require mechanisms for data |
| interchange with the outside world. These interchanges typically involve the |
| marshaling and unmarshaling of logical units of data to and from data streams |
| (files, network connections, memory buffers etc.). Applications usually have |
| some code for serializing and deserializing the data types that they manipulate |
| embedded in them. The work of serialization has several features that make |
| automatic code generation for it worthwhile. Given a particular output encoding |
| (binary, XML, etc.), serialization of primitive types and simple compositions |
| of primitives (structs, vectors etc.) is a very mechanical task. Manually |
| written serialization code can be susceptible to bugs especially when records |
| have a large number of fields or a record definition changes between software |
| versions. Lastly, it can be very useful for applications written in different |
| programming languages to be able to share and interchange data. This can be |
| made a lot easier by describing the data records manipulated by these |
| applications in a language agnostic manner and using the descriptions to derive |
| implementations of serialization in multiple target languages. |
| |
| This document describes Hadoop Record I/O, a mechanism that is aimed |
| at |
| <ul> |
| <li> enabling the specification of simple serializable data types (records) |
| <li> enabling the generation of code in multiple target languages for |
| marshaling and unmarshaling such types |
| <li> providing target language specific support that will enable application |
| programmers to incorporate generated code into their applications |
| </ul> |
| |
| The goals of Hadoop Record I/O are similar to those of mechanisms such as XDR, |
| ASN.1, PADS and ICE. While these systems all include a DDL that enables |
| the specification of most record types, they differ widely in what else they |
| focus on. The focus in Hadoop Record I/O is on data marshaling and |
| multi-lingual support. We take a translator-based approach to serialization. |
| Hadoop users have to describe their data in a simple data description |
| language. The Hadoop DDL translator rcc generates code that users |
| can invoke in order to read/write their data from/to simple stream |
| abstractions. Next we list explicitly some of the goals and non-goals of |
| Hadoop Record I/O. |
| |
| |
| <h3>Goals</h3> |
| |
| <ul> |
| <li> Support for commonly used primitive types. Hadoop should include as |
| primitives commonly used builtin types from programming languages we intend to |
| support. |
| |
| <li> Support for common data compositions (including recursive compositions). |
| Hadoop should support widely used composite types such as structs and |
| vectors. |
| |
| <li> Code generation in multiple target languages. Hadoop should be capable of |
| generating serialization code in multiple target languages and should be |
| easily extensible to new target languages. The initial target languages are |
| C++ and Java. |
| |
| <li> Support for generated target languages. Hadooop should include support |
| in the form of headers, libraries, packages for supported target languages |
| that enable easy inclusion and use of generated code in applications. |
| |
| <li> Support for multiple output encodings. Candidates include |
| packed binary, comma-separated text, XML etc. |
| |
| <li> Support for specifying record types in a backwards/forwards compatible |
| manner. This will probably be in the form of support for optional fields in |
| records. This version of the document does not include a description of the |
| planned mechanism, we intend to include it in the next iteration. |
| |
| </ul> |
| |
| <h3>Non-Goals</h3> |
| |
| <ul> |
| <li> Serializing existing arbitrary C++ classes. |
| <li> Serializing complex data structures such as trees, linked lists etc. |
| <li> Built-in indexing schemes, compression, or check-sums. |
| <li> Dynamic construction of objects from an XML schema. |
| </ul> |
| |
| The remainder of this document describes the features of Hadoop record I/O |
| in more detail. Section 2 describes the data types supported by the system. |
| Section 3 lays out the DDL syntax with some examples of simple records. |
| Section 4 describes the process of code generation with rcc. Section 5 |
| describes target language mappings and support for Hadoop types. We include a |
| fairly complete description of C++ mappings with intent to include Java and |
| others in upcoming iterations of this document. The last section talks about |
| supported output encodings. |
| |
| |
| <h2>Data Types and Streams</h2> |
| |
| This section describes the primitive and composite types supported by Hadoop. |
| We aim to support a set of types that can be used to simply and efficiently |
| express a wide range of record types in different programming languages. |
| |
| <h3>Primitive Types</h3> |
| |
| For the most part, the primitive types of Hadoop map directly to primitive |
| types in high level programming languages. Special cases are the |
| ustring (a Unicode string) and buffer types, which we believe |
| find wide use and which are usually implemented in library code and not |
| available as language built-ins. Hadoop also supplies these via library code |
| when a target language built-in is not present and there is no widely |
| adopted "standard" implementation. The complete list of primitive types is: |
| |
| <ul> |
| <li> byte: An 8-bit unsigned integer. |
| <li> boolean: A boolean value. |
| <li> int: A 32-bit signed integer. |
| <li> long: A 64-bit signed integer. |
| <li> float: A single precision floating point number as described by |
| IEEE-754. |
| <li> double: A double precision floating point number as described by |
| IEEE-754. |
| <li> ustring: A string consisting of Unicode characters. |
| <li> buffer: An arbitrary sequence of bytes. |
| </ul> |
| |
| |
| <h3>Composite Types</h3> |
| Hadoop supports a small set of composite types that enable the description |
| of simple aggregate types and containers. A composite type is serialized |
| by sequentially serializing it constituent elements. The supported |
| composite types are: |
| |
| <ul> |
| |
| <li> record: An aggregate type like a C-struct. This is a list of |
| typed fields that are together considered a single unit of data. A record |
| is serialized by sequentially serializing its constituent fields. In addition |
| to serialization a record has comparison operations (equality and less-than) |
| implemented for it, these are defined as memberwise comparisons. |
| |
| <li>vector: A sequence of entries of the same data type, primitive |
| or composite. |
| |
| <li> map: An associative container mapping instances of a key type to |
| instances of a value type. The key and value types may themselves be primitive |
| or composite types. |
| |
| </ul> |
| |
| <h3>Streams</h3> |
| |
| Hadoop generates code for serializing and deserializing record types to |
| abstract streams. For each target language Hadoop defines very simple input |
| and output stream interfaces. Application writers can usually develop |
| concrete implementations of these by putting a one method wrapper around |
| an existing stream implementation. |
| |
| |
| <h2>DDL Syntax and Examples</h2> |
| |
| We now describe the syntax of the Hadoop data description language. This is |
| followed by a few examples of DDL usage. |
| |
| <h3>Hadoop DDL Syntax</h3> |
| |
| <pre><code> |
| recfile = *include module *record |
| include = "include" path |
| path = (relative-path / absolute-path) |
| module = "module" module-name |
| module-name = name *("." name) |
| record := "class" name "{" 1*(field) "}" |
| field := type name ";" |
| name := ALPHA (ALPHA / DIGIT / "_" )* |
| type := (ptype / ctype) |
| ptype := ("byte" / "boolean" / "int" | |
| "long" / "float" / "double" |
| "ustring" / "buffer") |
| ctype := (("vector" "<" type ">") / |
| ("map" "<" type "," type ">" ) ) / name) |
| </code></pre> |
| |
| A DDL file describes one or more record types. It begins with zero or |
| more include declarations, a single mandatory module declaration |
| followed by zero or more class declarations. The semantics of each of |
| these declarations are described below: |
| |
| <ul> |
| |
| <li>include: An include declaration specifies a DDL file to be |
| referenced when generating code for types in the current DDL file. Record types |
| in the current compilation unit may refer to types in all included files. |
| File inclusion is recursive. An include does not trigger code |
| generation for the referenced file. |
| |
| <li> module: Every Hadoop DDL file must have a single module |
| declaration that follows the list of includes and precedes all record |
| declarations. A module declaration identifies a scope within which |
| the names of all types in the current file are visible. Module names are |
| mapped to C++ namespaces, Java packages etc. in generated code. |
| |
| <li> class: Records types are specified through class |
| declarations. A class declaration is like a Java class declaration. |
| It specifies a named record type and a list of fields that constitute records |
| of the type. Usage is illustrated in the following examples. |
| |
| </ul> |
| |
| <h3>Examples</h3> |
| |
| <ul> |
| <li>A simple DDL file links.jr with just one record declaration. |
| <pre><code> |
| module links { |
| class Link { |
| ustring URL; |
| boolean isRelative; |
| ustring anchorText; |
| }; |
| } |
| </code></pre> |
| |
| <li> A DDL file outlinks.jr which includes another |
| <pre><code> |
| include "links.jr" |
| |
| module outlinks { |
| class OutLinks { |
| ustring baseURL; |
| vector<links.Link> outLinks; |
| }; |
| } |
| </code></pre> |
| </ul> |
| |
| <h2>Code Generation</h2> |
| |
| The Hadoop translator is written in Java. Invocation is done by executing a |
| wrapper shell script named named rcc. It takes a list of |
| record description files as a mandatory argument and an |
| optional language argument (the default is Java) --language or |
| -l. Thus a typical invocation would look like: |
| <pre><code> |
| $ rcc -l C++ <filename> ... |
| </code></pre> |
| |
| |
| <h2>Target Language Mappings and Support</h2> |
| |
| For all target languages, the unit of code generation is a record type. |
| For each record type, Hadoop generates code for serialization and |
| deserialization, record comparison and access to record members. |
| |
| <h3>C++</h3> |
| |
| Support for including Hadoop generated C++ code in applications comes in the |
| form of a header file recordio.hh which needs to be included in source |
| that uses Hadoop types and a library librecordio.a which applications need |
| to be linked with. The header declares the Hadoop C++ namespace which defines |
| appropriate types for the various primitives, the basic interfaces for |
| records and streams and enumerates the supported serialization encodings. |
| Declarations of these interfaces and a description of their semantics follow: |
| |
| <pre><code> |
| namespace hadoop { |
| |
| enum RecFormat { kBinary, kXML, kCSV }; |
| |
| class InStream { |
| public: |
| virtual ssize_t read(void *buf, size_t n) = 0; |
| }; |
| |
| class OutStream { |
| public: |
| virtual ssize_t write(const void *buf, size_t n) = 0; |
| }; |
| |
| class IOError : public runtime_error { |
| public: |
| explicit IOError(const std::string& msg); |
| }; |
| |
| class IArchive; |
| class OArchive; |
| |
| class RecordReader { |
| public: |
| RecordReader(InStream& in, RecFormat fmt); |
| virtual ~RecordReader(void); |
| |
| virtual void read(Record& rec); |
| }; |
| |
| class RecordWriter { |
| public: |
| RecordWriter(OutStream& out, RecFormat fmt); |
| virtual ~RecordWriter(void); |
| |
| virtual void write(Record& rec); |
| }; |
| |
| |
| class Record { |
| public: |
| virtual std::string type(void) const = 0; |
| virtual std::string signature(void) const = 0; |
| protected: |
| virtual bool validate(void) const = 0; |
| |
| virtual void |
| serialize(OArchive& oa, const std::string& tag) const = 0; |
| |
| virtual void |
| deserialize(IArchive& ia, const std::string& tag) = 0; |
| }; |
| } |
| </code></pre> |
| |
| <ul> |
| |
| <li> RecFormat: An enumeration of the serialization encodings supported |
| by this implementation of Hadoop. |
| |
| <li> InStream: A simple abstraction for an input stream. This has a |
| single public read method that reads n bytes from the stream into |
| the buffer buf. Has the same semantics as a blocking read system |
| call. Returns the number of bytes read or -1 if an error occurs. |
| |
| <li> OutStream: A simple abstraction for an output stream. This has a |
| single write method that writes n bytes to the stream from the |
| buffer buf. Has the same semantics as a blocking write system |
| call. Returns the number of bytes written or -1 if an error occurs. |
| |
| <li> RecordReader: A RecordReader reads records one at a time from |
| an underlying stream in a specified record format. The reader is instantiated |
| with a stream and a serialization format. It has a read method that |
| takes an instance of a record and deserializes the record from the stream. |
| |
| <li> RecordWriter: A RecordWriter writes records one at a |
| time to an underlying stream in a specified record format. The writer is |
| instantiated with a stream and a serialization format. It has a |
| write method that takes an instance of a record and serializes the |
| record to the stream. |
| |
| <li> Record: The base class for all generated record types. This has two |
| public methods type and signature that return the typename and the |
| type signature of the record. |
| |
| </ul> |
| |
| Two files are generated for each record file (note: not for each record). If a |
| record file is named "name.jr", the generated files are |
| "name.jr.cc" and "name.jr.hh" containing serialization |
| implementations and record type declarations respectively. |
| |
| For each record in the DDL file, the generated header file will contain a |
| class definition corresponding to the record type, method definitions for the |
| generated type will be present in the '.cc' file. The generated class will |
| inherit from the abstract class hadoop::Record. The DDL files |
| module declaration determines the namespace the record belongs to. |
| Each '.' delimited token in the module declaration results in the |
| creation of a namespace. For instance, the declaration module docs.links |
| results in the creation of a docs namespace and a nested |
| docs::links namespace. In the preceding examples, the Link class |
| is placed in the links namespace. The header file corresponding to |
| the links.jr file will contain: |
| |
| <pre><code> |
| namespace links { |
| class Link : public hadoop::Record { |
| // .... |
| }; |
| }; |
| </code></pre> |
| |
| Each field within the record will cause the generation of a private member |
| declaration of the appropriate type in the class declaration, and one or more |
| acccessor methods. The generated class will implement the serialize and |
| deserialize methods defined in hadoop::Record+. It will also |
| implement the inspection methods type and signature from |
| hadoop::Record. A default constructor and virtual destructor will also |
| be generated. Serialization code will read/write records into streams that |
| implement the hadoop::InStream and the hadoop::OutStream interfaces. |
| |
| For each member of a record an accessor method is generated that returns |
| either the member or a reference to the member. For members that are returned |
| by value, a setter method is also generated. This is true for primitive |
| data members of the types byte, int, long, boolean, float and |
| double. For example, for a int field called MyField the folowing |
| code is generated. |
| |
| <pre><code> |
| ... |
| private: |
| int32_t mMyField; |
| ... |
| public: |
| int32_t getMyField(void) const { |
| return mMyField; |
| }; |
| |
| void setMyField(int32_t m) { |
| mMyField = m; |
| }; |
| ... |
| </code></pre> |
| |
| For a ustring or buffer or composite field. The generated code |
| only contains accessors that return a reference to the field. A const |
| and a non-const accessor are generated. For example: |
| |
| <pre><code> |
| ... |
| private: |
| std::string mMyBuf; |
| ... |
| public: |
| |
| std::string& getMyBuf() { |
| return mMyBuf; |
| }; |
| |
| const std::string& getMyBuf() const { |
| return mMyBuf; |
| }; |
| ... |
| </code></pre> |
| |
| <h4>Examples</h4> |
| |
| Suppose the inclrec.jr file contains: |
| <pre><code> |
| module inclrec { |
| class RI { |
| int I32; |
| double D; |
| ustring S; |
| }; |
| } |
| </code></pre> |
| |
| and the testrec.jr file contains: |
| |
| <pre><code> |
| include "inclrec.jr" |
| module testrec { |
| class R { |
| vector<float> VF; |
| RI Rec; |
| buffer Buf; |
| }; |
| } |
| </code></pre> |
| |
| Then the invocation of rcc such as: |
| <pre><code> |
| $ rcc -l c++ inclrec.jr testrec.jr |
| </code></pre> |
| will result in generation of four files: |
| inclrec.jr.{cc,hh} and testrec.jr.{cc,hh}. |
| |
| The inclrec.jr.hh will contain: |
| |
| <pre><code> |
| #ifndef _INCLREC_JR_HH_ |
| #define _INCLREC_JR_HH_ |
| |
| #include "recordio.hh" |
| |
| namespace inclrec { |
| |
| class RI : public hadoop::Record { |
| |
| private: |
| |
| int32_t I32; |
| double D; |
| std::string S; |
| |
| public: |
| |
| RI(void); |
| virtual ~RI(void); |
| |
| virtual bool operator==(const RI& peer) const; |
| virtual bool operator<(const RI& peer) const; |
| |
| virtual int32_t getI32(void) const { return I32; } |
| virtual void setI32(int32_t v) { I32 = v; } |
| |
| virtual double getD(void) const { return D; } |
| virtual void setD(double v) { D = v; } |
| |
| virtual std::string& getS(void) const { return S; } |
| virtual const std::string& getS(void) const { return S; } |
| |
| virtual std::string type(void) const; |
| virtual std::string signature(void) const; |
| |
| protected: |
| |
| virtual void serialize(hadoop::OArchive& a) const; |
| virtual void deserialize(hadoop::IArchive& a); |
| }; |
| } // end namespace inclrec |
| |
| #endif /* _INCLREC_JR_HH_ */ |
| |
| </code></pre> |
| |
| The testrec.jr.hh file will contain: |
| |
| |
| <pre><code> |
| |
| #ifndef _TESTREC_JR_HH_ |
| #define _TESTREC_JR_HH_ |
| |
| #include "inclrec.jr.hh" |
| |
| namespace testrec { |
| class R : public hadoop::Record { |
| |
| private: |
| |
| std::vector<float> VF; |
| inclrec::RI Rec; |
| std::string Buf; |
| |
| public: |
| |
| R(void); |
| virtual ~R(void); |
| |
| virtual bool operator==(const R& peer) const; |
| virtual bool operator<(const R& peer) const; |
| |
| virtual std::vector<float>& getVF(void) const; |
| virtual const std::vector<float>& getVF(void) const; |
| |
| virtual std::string& getBuf(void) const ; |
| virtual const std::string& getBuf(void) const; |
| |
| virtual inclrec::RI& getRec(void) const; |
| virtual const inclrec::RI& getRec(void) const; |
| |
| virtual bool serialize(hadoop::OutArchive& a) const; |
| virtual bool deserialize(hadoop::InArchive& a); |
| |
| virtual std::string type(void) const; |
| virtual std::string signature(void) const; |
| }; |
| }; // end namespace testrec |
| #endif /* _TESTREC_JR_HH_ */ |
| |
| </code></pre> |
| |
| <h3>Java</h3> |
| |
| Code generation for Java is similar to that for C++. A Java class is generated |
| for each record type with private members corresponding to the fields. Getters |
| and setters for fields are also generated. Some differences arise in the |
| way comparison is expressed and in the mapping of modules to packages and |
| classes to files. For equality testing, an equals method is generated |
| for each record type. As per Java requirements a hashCode method is also |
| generated. For comparison a compareTo method is generated for each |
| record type. This has the semantics as defined by the Java Comparable |
| interface, that is, the method returns a negative integer, zero, or a positive |
| integer as the invoked object is less than, equal to, or greater than the |
| comparison parameter. |
| |
| A .java file is generated per record type as opposed to per DDL |
| file as in C++. The module declaration translates to a Java |
| package declaration. The module name maps to an identical Java package |
| name. In addition to this mapping, the DDL compiler creates the appropriate |
| directory hierarchy for the package and places the generated .java |
| files in the correct directories. |
| |
| <h2>Mapping Summary</h2> |
| |
| <pre><code> |
| DDL Type C++ Type Java Type |
| |
| boolean bool boolean |
| byte int8_t byte |
| int int32_t int |
| long int64_t long |
| float float float |
| double double double |
| ustring std::string java.lang.String |
| buffer std::string org.apache.hadoop.record.Buffer |
| class type class type class type |
| vector<type> std::vector<type> java.util.ArrayList<type> |
| map<type,type> std::map<type,type> java.util.TreeMap<type,type> |
| </code></pre> |
| |
| <h2>Data encodings</h2> |
| |
| This section describes the format of the data encodings supported by Hadoop. |
| Currently, three data encodings are supported, namely binary, CSV and XML. |
| |
| <h3>Binary Serialization Format</h3> |
| |
| The binary data encoding format is fairly dense. Serialization of composite |
| types is simply defined as a concatenation of serializations of the constituent |
| elements (lengths are included in vectors and maps). |
| |
| Composite types are serialized as follows: |
| <ul> |
| <li> class: Sequence of serialized members. |
| <li> vector: The number of elements serialized as an int. Followed by a |
| sequence of serialized elements. |
| <li> map: The number of key value pairs serialized as an int. Followed |
| by a sequence of serialized (key,value) pairs. |
| </ul> |
| |
| Serialization of primitives is more interesting, with a zero compression |
| optimization for integral types and normalization to UTF-8 for strings. |
| Primitive types are serialized as follows: |
| |
| <ul> |
| <li> byte: Represented by 1 byte, as is. |
| <li> boolean: Represented by 1-byte (0 or 1) |
| <li> int/long: Integers and longs are serialized zero compressed. |
| Represented as 1-byte if -120 <= value < 128. Otherwise, serialized as a |
| sequence of 2-5 bytes for ints, 2-9 bytes for longs. The first byte represents |
| the number of trailing bytes, N, as the negative number (-120-N). For example, |
| the number 1024 (0x400) is represented by the byte sequence 'x86 x04 x00'. |
| This doesn't help much for 4-byte integers but does a reasonably good job with |
| longs without bit twiddling. |
| <li> float/double: Serialized in IEEE 754 single and double precision |
| format in network byte order. This is the format used by Java. |
| <li> ustring: Serialized as 4-byte zero compressed length followed by |
| data encoded as UTF-8. Strings are normalized to UTF-8 regardless of native |
| language representation. |
| <li> buffer: Serialized as a 4-byte zero compressed length followed by the |
| raw bytes in the buffer. |
| </ul> |
| |
| |
| <h3>CSV Serialization Format</h3> |
| |
| The CSV serialization format has a lot more structure than the "standard" |
| Excel CSV format, but we believe the additional structure is useful because |
| |
| <ul> |
| <li> it makes parsing a lot easier without detracting too much from legibility |
| <li> the delimiters around composites make it obvious when one is reading a |
| sequence of Hadoop records |
| </ul> |
| |
| Serialization formats for the various types are detailed in the grammar that |
| follows. The notable feature of the formats is the use of delimiters for |
| indicating the certain field types. |
| |
| <ul> |
| <li> A string field begins with a single quote ('). |
| <li> A buffer field begins with a sharp (#). |
| <li> A class, vector or map begins with 's{', 'v{' or 'm{' respectively and |
| ends with '}'. |
| </ul> |
| |
| The CSV format can be described by the following grammar: |
| |
| <pre><code> |
| record = primitive / struct / vector / map |
| primitive = boolean / int / long / float / double / ustring / buffer |
| |
| boolean = "T" / "F" |
| int = ["-"] 1*DIGIT |
| long = ";" ["-"] 1*DIGIT |
| float = ["-"] 1*DIGIT "." 1*DIGIT ["E" / "e" ["-"] 1*DIGIT] |
| double = ";" ["-"] 1*DIGIT "." 1*DIGIT ["E" / "e" ["-"] 1*DIGIT] |
| |
| ustring = "'" *(UTF8 char except NULL, LF, % and , / "%00" / "%0a" / "%25" / "%2c" ) |
| |
| buffer = "#" *(BYTE except NULL, LF, % and , / "%00" / "%0a" / "%25" / "%2c" ) |
| |
| struct = "s{" record *("," record) "}" |
| vector = "v{" [record *("," record)] "}" |
| map = "m{" [*(record "," record)] "}" |
| </code></pre> |
| |
| <h3>XML Serialization Format</h3> |
| |
| The XML serialization format is the same used by Apache XML-RPC |
| (http://ws.apache.org/xmlrpc/types.html). This is an extension of the original |
| XML-RPC format and adds some additional data types. All record I/O types are |
| not directly expressible in this format, and access to a DDL is required in |
| order to convert these to valid types. All types primitive or composite are |
| represented by <value> elements. The particular XML-RPC type is |
| indicated by a nested element in the <value> element. The encoding for |
| records is always UTF-8. Primitive types are serialized as follows: |
| |
| <ul> |
| <li> byte: XML tag <ex:i1>. Values: 1-byte unsigned |
| integers represented in US-ASCII |
| <li> boolean: XML tag <boolean>. Values: "0" or "1" |
| <li> int: XML tags <i4> or <int>. Values: 4-byte |
| signed integers represented in US-ASCII. |
| <li> long: XML tag <ex:i8>. Values: 8-byte signed integers |
| represented in US-ASCII. |
| <li> float: XML tag <ex:float>. Values: Single precision |
| floating point numbers represented in US-ASCII. |
| <li> double: XML tag <double>. Values: Double precision |
| floating point numbers represented in US-ASCII. |
| <li> ustring: XML tag <;string>. Values: String values |
| represented as UTF-8. XML does not permit all Unicode characters in literal |
| data. In particular, NULLs and control chars are not allowed. Additionally, |
| XML processors are required to replace carriage returns with line feeds and to |
| replace CRLF sequences with line feeds. Programming languages that we work |
| with do not impose these restrictions on string types. To work around these |
| restrictions, disallowed characters and CRs are percent escaped in strings. |
| The '%' character is also percent escaped. |
| <li> buffer: XML tag <string&>. Values: Arbitrary binary |
| data. Represented as hexBinary, each byte is replaced by its 2-byte |
| hexadecimal representation. |
| </ul> |
| |
| Composite types are serialized as follows: |
| |
| <ul> |
| <li> class: XML tag <struct>. A struct is a sequence of |
| <member> elements. Each <member> element has a <name> |
| element and a <value> element. The <name> is a string that must |
| match /[a-zA-Z][a-zA-Z0-9_]*/. The value of the member is represented |
| by a <value> element. |
| |
| <li> vector: XML tag <array<. An <array> contains a |
| single <data> element. The <data> element is a sequence of |
| <value> elements each of which represents an element of the vector. |
| |
| <li> map: XML tag <array>. Same as vector. |
| |
| </ul> |
| |
| For example: |
| |
| <pre><code> |
| class { |
| int MY_INT; // value 5 |
| vector<float> MY_VEC; // values 0.1, -0.89, 2.45e4 |
| buffer MY_BUF; // value '\00\n\tabc%' |
| } |
| </code></pre> |
| |
| is serialized as |
| |
| <pre><code class="XML"> |
| <value> |
| <struct> |
| <member> |
| <name>MY_INT</name> |
| <value><i4>5</i4></value> |
| </member> |
| <member> |
| <name>MY_VEC</name> |
| <value> |
| <array> |
| <data> |
| <value><ex:float>0.1</ex:float></value> |
| <value><ex:float>-0.89</ex:float></value> |
| <value><ex:float>2.45e4</ex:float></value> |
| </data> |
| </array> |
| </value> |
| </member> |
| <member> |
| <name>MY_BUF</name> |
| <value><string>%00\n\tabc%25</string></value> |
| </member> |
| </struct> |
| </value> |
| </code></pre>]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.record.compiler"> |
| <!-- start class org.apache.hadoop.record.compiler.CodeBuffer --> |
| <class name="CodeBuffer" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[A wrapper around StringBuffer that automatically does indentation]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.CodeBuffer --> |
| <!-- start class org.apache.hadoop.record.compiler.Consts --> |
| <class name="Consts" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <field name="RIO_PREFIX" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="RTI_VAR" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="RTI_FILTER" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="RTI_FILTER_FIELDS" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="RECORD_OUTPUT" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="RECORD_INPUT" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="TAG" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[const definitions for Record I/O compiler]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.Consts --> |
| <!-- start class org.apache.hadoop.record.compiler.JBoolean --> |
| <class name="JBoolean" extends="org.apache.hadoop.record.compiler.JType" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JBoolean" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of JBoolean]]> |
| </doc> |
| </constructor> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.JBoolean --> |
| <!-- start class org.apache.hadoop.record.compiler.JBuffer --> |
| <class name="JBuffer" extends="org.apache.hadoop.record.compiler.JCompType" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JBuffer" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of JBuffer]]> |
| </doc> |
| </constructor> |
| <doc> |
| <![CDATA[Code generator for "buffer" type.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.JBuffer --> |
| <!-- start class org.apache.hadoop.record.compiler.JByte --> |
| <class name="JByte" extends="org.apache.hadoop.record.compiler.JType" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JByte" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[Code generator for "byte" type.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.JByte --> |
| <!-- start class org.apache.hadoop.record.compiler.JDouble --> |
| <class name="JDouble" extends="org.apache.hadoop.record.compiler.JType" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JDouble" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of JDouble]]> |
| </doc> |
| </constructor> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.JDouble --> |
| <!-- start class org.apache.hadoop.record.compiler.JField --> |
| <class name="JField" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JField" type="java.lang.String, T" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of JField]]> |
| </doc> |
| </constructor> |
| <doc> |
| <![CDATA[A thin wrappper around record field.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.JField --> |
| <!-- start class org.apache.hadoop.record.compiler.JFile --> |
| <class name="JFile" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JFile" type="java.lang.String, java.util.ArrayList<org.apache.hadoop.record.compiler.JFile>, java.util.ArrayList<org.apache.hadoop.record.compiler.JRecord>" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of JFile |
| |
| @param name possibly full pathname to the file |
| @param inclFiles included files (as JFile) |
| @param recList List of records defined within this file]]> |
| </doc> |
| </constructor> |
| <method name="genCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="language" type="java.lang.String"/> |
| <param name="destDir" type="java.lang.String"/> |
| <param name="options" type="java.util.ArrayList<java.lang.String>"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Generate record code in given language. Language should be all |
| lowercase.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Container for the Hadoop Record DDL. |
| The main components of the file are filename, list of included files, |
| and records defined in that file.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.JFile --> |
| <!-- start class org.apache.hadoop.record.compiler.JFloat --> |
| <class name="JFloat" extends="org.apache.hadoop.record.compiler.JType" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JFloat" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of JFloat]]> |
| </doc> |
| </constructor> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.JFloat --> |
| <!-- start class org.apache.hadoop.record.compiler.JInt --> |
| <class name="JInt" extends="org.apache.hadoop.record.compiler.JType" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JInt" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of JInt]]> |
| </doc> |
| </constructor> |
| <doc> |
| <![CDATA[Code generator for "int" type]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.JInt --> |
| <!-- start class org.apache.hadoop.record.compiler.JLong --> |
| <class name="JLong" extends="org.apache.hadoop.record.compiler.JType" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JLong" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of JLong]]> |
| </doc> |
| </constructor> |
| <doc> |
| <![CDATA[Code generator for "long" type]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.JLong --> |
| <!-- start class org.apache.hadoop.record.compiler.JMap --> |
| <class name="JMap" extends="org.apache.hadoop.record.compiler.JCompType" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JMap" type="org.apache.hadoop.record.compiler.JType, org.apache.hadoop.record.compiler.JType" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of JMap]]> |
| </doc> |
| </constructor> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.JMap --> |
| <!-- start class org.apache.hadoop.record.compiler.JRecord --> |
| <class name="JRecord" extends="org.apache.hadoop.record.compiler.JCompType" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JRecord" type="java.lang.String, java.util.ArrayList<org.apache.hadoop.record.compiler.JField<org.apache.hadoop.record.compiler.JType>>" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of JRecord]]> |
| </doc> |
| </constructor> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.JRecord --> |
| <!-- start class org.apache.hadoop.record.compiler.JString --> |
| <class name="JString" extends="org.apache.hadoop.record.compiler.JCompType" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JString" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of JString]]> |
| </doc> |
| </constructor> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.JString --> |
| <!-- start class org.apache.hadoop.record.compiler.JType --> |
| <class name="JType" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JType" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <doc> |
| <![CDATA[Abstract Base class for all types supported by Hadoop Record I/O.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.JType --> |
| <!-- start class org.apache.hadoop.record.compiler.JVector --> |
| <class name="JVector" extends="org.apache.hadoop.record.compiler.JCompType" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="JVector" type="org.apache.hadoop.record.compiler.JType" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of JVector]]> |
| </doc> |
| </constructor> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.JVector --> |
| <doc> |
| <![CDATA[This package contains classes needed for code generation |
| from the hadoop record compiler. CppGenerator and JavaGenerator |
| are the main entry points from the parser. There are classes |
| corrsponding to every primitive type and compound type |
| included in Hadoop record I/O syntax.]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.record.compiler.ant"> |
| <!-- start class org.apache.hadoop.record.compiler.ant.RccTask --> |
| <class name="RccTask" extends="org.apache.tools.ant.Task" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="RccTask" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new instance of RccTask]]> |
| </doc> |
| </constructor> |
| <method name="setLanguage" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="language" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Sets the output language option |
| @param language "java"/"c++"]]> |
| </doc> |
| </method> |
| <method name="setFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="file" type="java.io.File"/> |
| <doc> |
| <![CDATA[Sets the record definition file attribute |
| @param file record definition file]]> |
| </doc> |
| </method> |
| <method name="setFailonerror" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="flag" type="boolean"/> |
| <doc> |
| <![CDATA[Given multiple files (via fileset), set the error handling behavior |
| @param flag true will throw build exception in case of failure (default)]]> |
| </doc> |
| </method> |
| <method name="setDestdir" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dir" type="java.io.File"/> |
| <doc> |
| <![CDATA[Sets directory where output files will be generated |
| @param dir output directory]]> |
| </doc> |
| </method> |
| <method name="addFileset" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="set" type="org.apache.tools.ant.types.FileSet"/> |
| <doc> |
| <![CDATA[Adds a fileset that can consist of one or more files |
| @param set Set of record definition files]]> |
| </doc> |
| </method> |
| <method name="execute" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="BuildException" type="org.apache.tools.ant.BuildException"/> |
| <doc> |
| <![CDATA[Invoke the Hadoop record compiler on each record definition file]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Hadoop record compiler ant Task |
| <p> This task takes the given record definition files and compiles them into |
| java or c++ |
| files. It is then up to the user to compile the generated files. |
| |
| <p> The task requires the <code>file</code> or the nested fileset element to be |
| specified. Optional attributes are <code>language</code> (set the output |
| language, default is "java"), |
| <code>destdir</code> (name of the destination directory for generated java/c++ |
| code, default is ".") and <code>failonerror</code> (specifies error handling |
| behavior. default is true). |
| <p><h4>Usage</h4> |
| <pre> |
| <recordcc |
| destdir="${basedir}/gensrc" |
| language="java"> |
| <fileset include="**\/*.jr" /> |
| </recordcc> |
| </pre>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.ant.RccTask --> |
| </package> |
| <package name="org.apache.hadoop.record.compiler.generated"> |
| <!-- start class org.apache.hadoop.record.compiler.generated.ParseException --> |
| <class name="ParseException" extends="java.lang.Exception" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ParseException" type="org.apache.hadoop.record.compiler.generated.Token, int[][], java.lang.String[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[This constructor is used by the method "generateParseException" |
| in the generated parser. Calling this constructor generates |
| a new object of this type with the fields "currentToken", |
| "expectedTokenSequences", and "tokenImage" set. The boolean |
| flag "specialConstructor" is also set to true to indicate that |
| this constructor was used to create this object. |
| This constructor calls its super class with the empty string |
| to force the "toString" method of parent class "Throwable" to |
| print the error message in the form: |
| ParseException: <result of getMessage>]]> |
| </doc> |
| </constructor> |
| <constructor name="ParseException" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The following constructors are for use by you for whatever |
| purpose you can think of. Constructing the exception in this |
| manner makes the exception behave in the normal way - i.e., as |
| documented in the class "Throwable". The fields "errorToken", |
| "expectedTokenSequences", and "tokenImage" do not contain |
| relevant information. The JavaCC generated code does not use |
| these constructors.]]> |
| </doc> |
| </constructor> |
| <constructor name="ParseException" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getMessage" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[This method has the standard behavior when this object has been |
| created using the standard constructors. Otherwise, it uses |
| "currentToken" and "expectedTokenSequences" to generate a parse |
| error message and returns it. If this object has been created |
| due to a parse error, and you do not catch it (it gets thrown |
| from the parser), then this method is called during the printing |
| of the final stack trace, and hence the correct error message |
| gets displayed.]]> |
| </doc> |
| </method> |
| <method name="add_escapes" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="str" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Used to convert raw characters to their escaped version |
| when these raw version cannot be used as part of an ASCII |
| string literal.]]> |
| </doc> |
| </method> |
| <field name="specialConstructor" type="boolean" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[This variable determines which constructor was used to create |
| this object and thereby affects the semantics of the |
| "getMessage" method (see below).]]> |
| </doc> |
| </field> |
| <field name="currentToken" type="org.apache.hadoop.record.compiler.generated.Token" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[This is the last token that has been consumed successfully. If |
| this object has been created due to a parse error, the token |
| followng this token will (therefore) be the first error token.]]> |
| </doc> |
| </field> |
| <field name="expectedTokenSequences" type="int[][]" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Each entry in this array is an array of integers. Each array |
| of integers represents a sequence of tokens (by their ordinal |
| values) that is expected at this point of the parse.]]> |
| </doc> |
| </field> |
| <field name="tokenImage" type="java.lang.String[]" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[This is a reference to the "tokenImage" array of the generated |
| parser within which the parse error occurred. This array is |
| defined in the generated ...Constants interface.]]> |
| </doc> |
| </field> |
| <field name="eol" type="java.lang.String" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The end of line string for this machine.]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[This exception is thrown when parse errors are encountered. |
| You can explicitly create objects of this exception type by |
| calling the method generateParseException in the generated |
| parser. |
| |
| You can modify this class to customize your error reporting |
| mechanisms so long as you retain the public fields.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.generated.ParseException --> |
| <!-- start class org.apache.hadoop.record.compiler.generated.Rcc --> |
| <class name="Rcc" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/> |
| <constructor name="Rcc" type="java.io.InputStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="Rcc" type="java.io.InputStream, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="Rcc" type="java.io.Reader" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="Rcc" type="org.apache.hadoop.record.compiler.generated.RccTokenManager" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| </method> |
| <method name="usage" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="driver" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| </method> |
| <method name="Input" return="org.apache.hadoop.record.compiler.JFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/> |
| </method> |
| <method name="Include" return="org.apache.hadoop.record.compiler.JFile" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/> |
| </method> |
| <method name="Module" return="java.util.ArrayList<org.apache.hadoop.record.compiler.JRecord>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/> |
| </method> |
| <method name="ModuleName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/> |
| </method> |
| <method name="RecordList" return="java.util.ArrayList<org.apache.hadoop.record.compiler.JRecord>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/> |
| </method> |
| <method name="Record" return="org.apache.hadoop.record.compiler.JRecord" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/> |
| </method> |
| <method name="Field" return="org.apache.hadoop.record.compiler.JField<org.apache.hadoop.record.compiler.JType>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/> |
| </method> |
| <method name="Type" return="org.apache.hadoop.record.compiler.JType" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/> |
| </method> |
| <method name="Map" return="org.apache.hadoop.record.compiler.JMap" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/> |
| </method> |
| <method name="Vector" return="org.apache.hadoop.record.compiler.JVector" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="ParseException" type="org.apache.hadoop.record.compiler.generated.ParseException"/> |
| </method> |
| <method name="ReInit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="stream" type="java.io.InputStream"/> |
| </method> |
| <method name="ReInit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="stream" type="java.io.InputStream"/> |
| <param name="encoding" type="java.lang.String"/> |
| </method> |
| <method name="ReInit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="stream" type="java.io.Reader"/> |
| </method> |
| <method name="ReInit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tm" type="org.apache.hadoop.record.compiler.generated.RccTokenManager"/> |
| </method> |
| <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getToken" return="org.apache.hadoop.record.compiler.generated.Token" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <param name="index" type="int"/> |
| </method> |
| <method name="generateParseException" return="org.apache.hadoop.record.compiler.generated.ParseException" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="enable_tracing" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="disable_tracing" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <field name="token_source" type="org.apache.hadoop.record.compiler.generated.RccTokenManager" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="token" type="org.apache.hadoop.record.compiler.generated.Token" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="jj_nt" type="org.apache.hadoop.record.compiler.generated.Token" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.generated.Rcc --> |
| <!-- start interface org.apache.hadoop.record.compiler.generated.RccConstants --> |
| <interface name="RccConstants" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <field name="EOF" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="MODULE_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="RECORD_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="INCLUDE_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="BYTE_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="BOOLEAN_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="INT_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="LONG_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="FLOAT_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="DOUBLE_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="USTRING_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="BUFFER_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="VECTOR_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="MAP_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="LBRACE_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="RBRACE_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="LT_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="GT_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="SEMICOLON_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="COMMA_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="DOT_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="CSTRING_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="IDENT_TKN" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="DEFAULT" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="WithinOneLineComment" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="WithinMultiLineComment" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="tokenImage" type="java.lang.String[]" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| </interface> |
| <!-- end interface org.apache.hadoop.record.compiler.generated.RccConstants --> |
| <!-- start class org.apache.hadoop.record.compiler.generated.RccTokenManager --> |
| <class name="RccTokenManager" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.record.compiler.generated.RccConstants"/> |
| <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="RccTokenManager" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setDebugStream" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ds" type="java.io.PrintStream"/> |
| </method> |
| <method name="ReInit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/> |
| </method> |
| <method name="ReInit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream"/> |
| <param name="lexState" type="int"/> |
| </method> |
| <method name="SwitchTo" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="lexState" type="int"/> |
| </method> |
| <method name="jjFillToken" return="org.apache.hadoop.record.compiler.generated.Token" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getNextToken" return="org.apache.hadoop.record.compiler.generated.Token" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <field name="debugStream" type="java.io.PrintStream" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="jjstrLiteralImages" type="java.lang.String[]" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="lexStateNames" type="java.lang.String[]" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="jjnewLexState" type="int[]" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="input_stream" type="org.apache.hadoop.record.compiler.generated.SimpleCharStream" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="curChar" type="char" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.generated.RccTokenManager --> |
| <!-- start class org.apache.hadoop.record.compiler.generated.SimpleCharStream --> |
| <class name="SimpleCharStream" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="SimpleCharStream" type="java.io.Reader, int, int, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="SimpleCharStream" type="java.io.Reader, int, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="SimpleCharStream" type="java.io.Reader" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/> |
| </constructor> |
| <constructor name="SimpleCharStream" type="java.io.InputStream, int, int, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String, int, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/> |
| </constructor> |
| <constructor name="SimpleCharStream" type="java.io.InputStream, int, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="SimpleCharStream" type="java.io.InputStream, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/> |
| </constructor> |
| <constructor name="SimpleCharStream" type="java.io.InputStream" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setTabSize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| </method> |
| <method name="getTabSize" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| </method> |
| <method name="ExpandBuff" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="wrapAround" type="boolean"/> |
| </method> |
| <method name="FillBuff" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="BeginToken" return="char" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="UpdateLineColumn" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="c" type="char"/> |
| </method> |
| <method name="readChar" return="char" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getEndColumn" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getEndLine" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getBeginColumn" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getBeginLine" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="backup" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="amount" type="int"/> |
| </method> |
| <method name="ReInit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dstream" type="java.io.Reader"/> |
| <param name="startline" type="int"/> |
| <param name="startcolumn" type="int"/> |
| <param name="buffersize" type="int"/> |
| </method> |
| <method name="ReInit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dstream" type="java.io.Reader"/> |
| <param name="startline" type="int"/> |
| <param name="startcolumn" type="int"/> |
| </method> |
| <method name="ReInit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dstream" type="java.io.Reader"/> |
| </method> |
| <method name="ReInit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dstream" type="java.io.InputStream"/> |
| <param name="encoding" type="java.lang.String"/> |
| <param name="startline" type="int"/> |
| <param name="startcolumn" type="int"/> |
| <param name="buffersize" type="int"/> |
| <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/> |
| </method> |
| <method name="ReInit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dstream" type="java.io.InputStream"/> |
| <param name="startline" type="int"/> |
| <param name="startcolumn" type="int"/> |
| <param name="buffersize" type="int"/> |
| </method> |
| <method name="ReInit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dstream" type="java.io.InputStream"/> |
| <param name="encoding" type="java.lang.String"/> |
| <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/> |
| </method> |
| <method name="ReInit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dstream" type="java.io.InputStream"/> |
| </method> |
| <method name="ReInit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dstream" type="java.io.InputStream"/> |
| <param name="encoding" type="java.lang.String"/> |
| <param name="startline" type="int"/> |
| <param name="startcolumn" type="int"/> |
| <exception name="UnsupportedEncodingException" type="java.io.UnsupportedEncodingException"/> |
| </method> |
| <method name="ReInit" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dstream" type="java.io.InputStream"/> |
| <param name="startline" type="int"/> |
| <param name="startcolumn" type="int"/> |
| </method> |
| <method name="GetImage" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="GetSuffix" return="char[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="len" type="int"/> |
| </method> |
| <method name="Done" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="adjustBeginLineColumn" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="newLine" type="int"/> |
| <param name="newCol" type="int"/> |
| <doc> |
| <![CDATA[Method to adjust line and column numbers for the start of a token.]]> |
| </doc> |
| </method> |
| <field name="staticFlag" type="boolean" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="bufpos" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="bufline" type="int[]" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="bufcolumn" type="int[]" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="column" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="line" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="prevCharIsCR" type="boolean" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="prevCharIsLF" type="boolean" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="inputStream" type="java.io.Reader" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="buffer" type="char[]" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="maxNextCharInd" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="inBuf" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <field name="tabSize" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[An implementation of interface CharStream, where the stream is assumed to |
| contain only ASCII characters (without unicode processing).]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.generated.SimpleCharStream --> |
| <!-- start class org.apache.hadoop.record.compiler.generated.Token --> |
| <class name="Token" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Token" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the image.]]> |
| </doc> |
| </method> |
| <method name="newToken" return="org.apache.hadoop.record.compiler.generated.Token" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ofKind" type="int"/> |
| <doc> |
| <![CDATA[Returns a new Token object, by default. However, if you want, you |
| can create and return subclass objects based on the value of ofKind. |
| Simply add the cases to the switch for all those special cases. |
| For example, if you have a subclass of Token called IDToken that |
| you want to create if ofKind is ID, simlpy add something like : |
| |
| case MyParserConstants.ID : return new IDToken(); |
| |
| to the following switch statement. Then you can cast matchedToken |
| variable to the appropriate type and use it in your lexical actions.]]> |
| </doc> |
| </method> |
| <field name="kind" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[An integer that describes the kind of this token. This numbering |
| system is determined by JavaCCParser, and a table of these numbers is |
| stored in the file ...Constants.java.]]> |
| </doc> |
| </field> |
| <field name="beginLine" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[beginLine and beginColumn describe the position of the first character |
| of this token; endLine and endColumn describe the position of the |
| last character of this token.]]> |
| </doc> |
| </field> |
| <field name="beginColumn" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[beginLine and beginColumn describe the position of the first character |
| of this token; endLine and endColumn describe the position of the |
| last character of this token.]]> |
| </doc> |
| </field> |
| <field name="endLine" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[beginLine and beginColumn describe the position of the first character |
| of this token; endLine and endColumn describe the position of the |
| last character of this token.]]> |
| </doc> |
| </field> |
| <field name="endColumn" type="int" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[beginLine and beginColumn describe the position of the first character |
| of this token; endLine and endColumn describe the position of the |
| last character of this token.]]> |
| </doc> |
| </field> |
| <field name="image" type="java.lang.String" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The string image of the token.]]> |
| </doc> |
| </field> |
| <field name="next" type="org.apache.hadoop.record.compiler.generated.Token" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[A reference to the next regular (non-special) token from the input |
| stream. If this is the last token from the input stream, or if the |
| token manager has not read tokens beyond this one, this field is |
| set to null. This is true only if this token is also a regular |
| token. Otherwise, see below for a description of the contents of |
| this field.]]> |
| </doc> |
| </field> |
| <field name="specialToken" type="org.apache.hadoop.record.compiler.generated.Token" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[This field is used to access special tokens that occur prior to this |
| token, but after the immediately preceding regular (non-special) token. |
| If there are no such special tokens, this field is set to null. |
| When there are more than one such special token, this field refers |
| to the last of these special tokens, which in turn refers to the next |
| previous special token through its specialToken field, and so on |
| until the first special token (whose specialToken field is null). |
| The next fields of special tokens refer to other special tokens that |
| immediately follow it (without an intervening regular token). If there |
| is no such token, this field is null.]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[Describes the input token stream.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.generated.Token --> |
| <!-- start class org.apache.hadoop.record.compiler.generated.TokenMgrError --> |
| <class name="TokenMgrError" extends="java.lang.Error" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="TokenMgrError" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="TokenMgrError" type="java.lang.String, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="TokenMgrError" type="boolean, int, int, int, java.lang.String, char, int" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="addEscapes" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="true" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="str" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Replaces unprintable characters by their espaced (or unicode escaped) |
| equivalents in the given string]]> |
| </doc> |
| </method> |
| <method name="LexicalError" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="EOFSeen" type="boolean"/> |
| <param name="lexState" type="int"/> |
| <param name="errorLine" type="int"/> |
| <param name="errorColumn" type="int"/> |
| <param name="errorAfter" type="java.lang.String"/> |
| <param name="curChar" type="char"/> |
| <doc> |
| <![CDATA[Returns a detailed message for the Error when it is thrown by the |
| token manager to indicate a lexical error. |
| Parameters : |
| EOFSeen : indicates if EOF caused the lexicl error |
| curLexState : lexical state in which this error occured |
| errorLine : line number when the error occured |
| errorColumn : column number when the error occured |
| errorAfter : prefix that was seen before this error occured |
| curchar : the offending character |
| Note: You can customize the lexical error message by modifying this method.]]> |
| </doc> |
| </method> |
| <method name="getMessage" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[You can also modify the body of this method to customize your error messages. |
| For example, cases like LOOP_DETECTED and INVALID_LEXICAL_STATE are not |
| of end-users concern, so you can return something like : |
| |
| "Internal Error : Please file a bug report .... " |
| |
| from this method for such cases in the release version of your parser.]]> |
| </doc> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.record.compiler.generated.TokenMgrError --> |
| <doc> |
| <![CDATA[This package contains code generated by JavaCC from the |
| Hadoop record syntax file rcc.jj. For details about the |
| record file syntax please @see org.apache.hadoop.record.]]> |
| </doc> |
| </package> |
| <package name="org.apache.hadoop.record.meta"> |
| <!-- start class org.apache.hadoop.record.meta.FieldTypeInfo --> |
| <class name="FieldTypeInfo" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getTypeID" return="org.apache.hadoop.record.meta.TypeID" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[get the field's TypeID object]]> |
| </doc> |
| </method> |
| <method name="getFieldID" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[get the field's id (name)]]> |
| </doc> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Two FieldTypeInfos are equal if ach of their fields matches]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[We use a basic hashcode implementation, since this class will likely not |
| be used as a hashmap key]]> |
| </doc> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ti" type="org.apache.hadoop.record.meta.FieldTypeInfo"/> |
| </method> |
| <doc> |
| <![CDATA[Represents a type information for a field, which is made up of its |
| ID (name) and its type (a TypeID object).]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.meta.FieldTypeInfo --> |
| <!-- start class org.apache.hadoop.record.meta.MapTypeID --> |
| <class name="MapTypeID" extends="org.apache.hadoop.record.meta.TypeID" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MapTypeID" type="org.apache.hadoop.record.meta.TypeID, org.apache.hadoop.record.meta.TypeID" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getKeyTypeID" return="org.apache.hadoop.record.meta.TypeID" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[get the TypeID of the map's key element]]> |
| </doc> |
| </method> |
| <method name="getValueTypeID" return="org.apache.hadoop.record.meta.TypeID" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[get the TypeID of the map's value element]]> |
| </doc> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Two map typeIDs are equal if their constituent elements have the |
| same type]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[We use a basic hashcode implementation, since this class will likely not |
| be used as a hashmap key]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Represents typeID for a Map]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.meta.MapTypeID --> |
| <!-- start class org.apache.hadoop.record.meta.RecordTypeInfo --> |
| <class name="RecordTypeInfo" extends="org.apache.hadoop.record.Record" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="RecordTypeInfo" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create an empty RecordTypeInfo object.]]> |
| </doc> |
| </constructor> |
| <constructor name="RecordTypeInfo" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a RecordTypeInfo object representing a record with the given name |
| @param name Name of the record]]> |
| </doc> |
| </constructor> |
| <method name="getName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[return the name of the record]]> |
| </doc> |
| </method> |
| <method name="setName" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[set the name of the record]]> |
| </doc> |
| </method> |
| <method name="addField" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fieldName" type="java.lang.String"/> |
| <param name="tid" type="org.apache.hadoop.record.meta.TypeID"/> |
| <doc> |
| <![CDATA[Add a field. |
| @param fieldName Name of the field |
| @param tid Type ID of the field]]> |
| </doc> |
| </method> |
| <method name="getFieldTypeInfos" return="java.util.Collection<org.apache.hadoop.record.meta.FieldTypeInfo>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return a collection of field type infos]]> |
| </doc> |
| </method> |
| <method name="getNestedStructTypeInfo" return="org.apache.hadoop.record.meta.RecordTypeInfo" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Return the type info of a nested record. We only consider nesting |
| to one level. |
| @param name Name of the nested record]]> |
| </doc> |
| </method> |
| <method name="serialize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="rout" type="org.apache.hadoop.record.RecordOutput"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Serialize the type information for a record]]> |
| </doc> |
| </method> |
| <method name="deserialize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="rin" type="org.apache.hadoop.record.RecordInput"/> |
| <param name="tag" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Deserialize the type information for a record]]> |
| </doc> |
| </method> |
| <method name="compareTo" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="peer_" type="java.lang.Object"/> |
| <exception name="ClassCastException" type="java.lang.ClassCastException"/> |
| <doc> |
| <![CDATA[This class doesn't implement Comparable as it's not meant to be used |
| for anything besides de/serializing. |
| So we always throw an exception. |
| Not implemented. Always returns 0 if another RecordTypeInfo is passed in.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A record's Type Information object which can read/write itself. |
| |
| Type information for a record comprises metadata about the record, |
| as well as a collection of type information for each field in the record.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.meta.RecordTypeInfo --> |
| <!-- start class org.apache.hadoop.record.meta.StructTypeID --> |
| <class name="StructTypeID" extends="org.apache.hadoop.record.meta.TypeID" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="StructTypeID" type="org.apache.hadoop.record.meta.RecordTypeInfo" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a StructTypeID based on the RecordTypeInfo of some record]]> |
| </doc> |
| </constructor> |
| <method name="getFieldTypeInfos" return="java.util.Collection<org.apache.hadoop.record.meta.FieldTypeInfo>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[Represents typeID for a struct]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.meta.StructTypeID --> |
| <!-- start class org.apache.hadoop.record.meta.TypeID --> |
| <class name="TypeID" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="getTypeVal" return="byte" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the type value. One of the constants in RIOType.]]> |
| </doc> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Two base typeIDs are equal if they refer to the same type]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[We use a basic hashcode implementation, since this class will likely not |
| be used as a hashmap key]]> |
| </doc> |
| </method> |
| <field name="BoolTypeID" type="org.apache.hadoop.record.meta.TypeID" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constant classes for the basic types, so we can share them.]]> |
| </doc> |
| </field> |
| <field name="BufferTypeID" type="org.apache.hadoop.record.meta.TypeID" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="ByteTypeID" type="org.apache.hadoop.record.meta.TypeID" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="DoubleTypeID" type="org.apache.hadoop.record.meta.TypeID" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="FloatTypeID" type="org.apache.hadoop.record.meta.TypeID" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="IntTypeID" type="org.apache.hadoop.record.meta.TypeID" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="LongTypeID" type="org.apache.hadoop.record.meta.TypeID" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="StringTypeID" type="org.apache.hadoop.record.meta.TypeID" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="typeVal" type="byte" |
| transient="false" volatile="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[Represents typeID for basic types.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.meta.TypeID --> |
| <!-- start class org.apache.hadoop.record.meta.TypeID.RIOType --> |
| <class name="TypeID.RIOType" extends="java.lang.Object" |
| abstract="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="TypeID.RIOType" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <field name="BOOL" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="BUFFER" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="BYTE" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="DOUBLE" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="FLOAT" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="INT" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="LONG" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="MAP" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="STRING" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="STRUCT" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="VECTOR" type="byte" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[constants representing the IDL types we support]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.meta.TypeID.RIOType --> |
| <!-- start class org.apache.hadoop.record.meta.Utils --> |
| <class name="Utils" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="skip" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="rin" type="org.apache.hadoop.record.RecordInput"/> |
| <param name="tag" type="java.lang.String"/> |
| <param name="typeID" type="org.apache.hadoop.record.meta.TypeID"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[read/skip bytes from stream based on a type]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Various utility functions for Hadooop record I/O platform.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.meta.Utils --> |
| <!-- start class org.apache.hadoop.record.meta.VectorTypeID --> |
| <class name="VectorTypeID" extends="org.apache.hadoop.record.meta.TypeID" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="VectorTypeID" type="org.apache.hadoop.record.meta.TypeID" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getElementTypeID" return="org.apache.hadoop.record.meta.TypeID" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="o" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Two vector typeIDs are equal if their constituent elements have the |
| same type]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[We use a basic hashcode implementation, since this class will likely not |
| be used as a hashmap key]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Represents typeID for vector.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.record.meta.VectorTypeID --> |
| </package> |
| <package name="org.apache.hadoop.security"> |
| <!-- start class org.apache.hadoop.security.UnixUserGroupInformation --> |
| <class name="UnixUserGroupInformation" extends="org.apache.hadoop.security.UserGroupInformation" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="UnixUserGroupInformation" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Default constructor]]> |
| </doc> |
| </constructor> |
| <constructor name="UnixUserGroupInformation" type="java.lang.String, java.lang.String[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructor with parameters user name and its group names. |
| The first entry in the groups list is the default group. |
| |
| @param userName a user's name |
| @param groupNames groups list, first of which is the default group |
| @exception IllegalArgumentException if any argument is null]]> |
| </doc> |
| </constructor> |
| <constructor name="UnixUserGroupInformation" type="java.lang.String[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Constructor with parameter user/group names |
| |
| @param ugi an array containing user/group names, the first |
| element of which is the user name, the second of |
| which is the default group name. |
| @exception IllegalArgumentException if the array size is less than 2 |
| or any element is null.]]> |
| </doc> |
| </constructor> |
| <method name="createImmutable" return="org.apache.hadoop.security.UnixUserGroupInformation" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ugi" type="java.lang.String[]"/> |
| <doc> |
| <![CDATA[Create an immutable {@link UnixUserGroupInformation} object.]]> |
| </doc> |
| </method> |
| <method name="getGroupNames" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return an array of group names]]> |
| </doc> |
| </method> |
| <method name="getUserName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return the user's name]]> |
| </doc> |
| </method> |
| <method name="readFields" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="in" type="java.io.DataInput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Deserialize this object |
| First check if this is a UGI in the string format. |
| If no, throw an IOException; otherwise |
| set this object's fields by reading them from the given data input |
| |
| @param in input stream |
| @exception IOException is thrown if encounter any error when reading]]> |
| </doc> |
| </method> |
| <method name="write" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.DataOutput"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Serialize this object |
| First write a string marking that this is a UGI in the string format, |
| then write this object's serialized form to the given data output |
| |
| @param out output stream |
| @exception IOException if encounter any error during writing]]> |
| </doc> |
| </method> |
| <method name="saveToConf" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="attr" type="java.lang.String"/> |
| <param name="ugi" type="org.apache.hadoop.security.UnixUserGroupInformation"/> |
| <doc> |
| <![CDATA[Store the given <code>ugi</code> as a comma separated string in |
| <code>conf</code> as a property <code>attr</code> |
| |
| The String starts with the user name followed by the default group names, |
| and other group names. |
| |
| @param conf configuration |
| @param attr property name |
| @param ugi a UnixUserGroupInformation]]> |
| </doc> |
| </method> |
| <method name="readFromConf" return="org.apache.hadoop.security.UnixUserGroupInformation" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="attr" type="java.lang.String"/> |
| <exception name="LoginException" type="javax.security.auth.login.LoginException"/> |
| <doc> |
| <![CDATA[Read a UGI from the given <code>conf</code> |
| |
| The object is expected to store with the property name <code>attr</code> |
| as a comma separated string that starts |
| with the user name followed by group names. |
| If the property name is not defined, return null. |
| It's assumed that there is only one UGI per user. If this user already |
| has a UGI in the ugi map, return the ugi in the map. |
| Otherwise, construct a UGI from the configuration, store it in the |
| ugi map and return it. |
| |
| @param conf configuration |
| @param attr property name |
| @return a UnixUGI |
| @throws LoginException if the stored string is ill-formatted.]]> |
| </doc> |
| </method> |
| <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="LoginException" type="javax.security.auth.login.LoginException"/> |
| <doc> |
| <![CDATA[Get current user's name and the names of all its groups from Unix. |
| It's assumed that there is only one UGI per user. If this user already |
| has a UGI in the ugi map, return the ugi in the map. |
| Otherwise get the current user's information from Unix, store it |
| in the map, and return it.]]> |
| </doc> |
| </method> |
| <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="LoginException" type="javax.security.auth.login.LoginException"/> |
| <doc> |
| <![CDATA[Equivalent to login(conf, false).]]> |
| </doc> |
| </method> |
| <method name="login" return="org.apache.hadoop.security.UnixUserGroupInformation" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="save" type="boolean"/> |
| <exception name="LoginException" type="javax.security.auth.login.LoginException"/> |
| <doc> |
| <![CDATA[Get a user's name & its group names from the given configuration; |
| If it is not defined in the configuration, get the current user's |
| information from Unix. |
| If the user has a UGI in the ugi map, return the one in |
| the UGI map. |
| |
| @param conf either a job configuration or client's configuration |
| @param save saving it to conf? |
| @return UnixUserGroupInformation a user/group information |
| @exception LoginException if not able to get the user/group information]]> |
| </doc> |
| </method> |
| <method name="equals" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="other" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Decide if two UGIs are the same |
| |
| @param other other object |
| @return true if they are the same; false otherwise.]]> |
| </doc> |
| </method> |
| <method name="hashCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns a hash code for this UGI. |
| The hash code for a UGI is the hash code of its user name string. |
| |
| @return a hash code value for this UGI.]]> |
| </doc> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Convert this object to a string |
| |
| @return a comma separated string containing the user name and group names]]> |
| </doc> |
| </method> |
| <field name="UGI_PROPERTY_NAME" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[An implementation of UserGroupInformation in the Unix system]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.security.UnixUserGroupInformation --> |
| <!-- start class org.apache.hadoop.security.UserGroupInformation --> |
| <class name="UserGroupInformation" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.io.Writable"/> |
| <constructor name="UserGroupInformation" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getCurrentUGI" return="org.apache.hadoop.security.UserGroupInformation" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@return the {@link UserGroupInformation} for the current thread]]> |
| </doc> |
| </method> |
| <method name="setCurrentUGI" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/> |
| <doc> |
| <![CDATA[Set the {@link UserGroupInformation} for the current thread]]> |
| </doc> |
| </method> |
| <method name="getUserName" return="java.lang.String" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get username |
| |
| @return the user's name]]> |
| </doc> |
| </method> |
| <method name="getGroupNames" return="java.lang.String[]" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the name of the groups that the user belong to |
| |
| @return an array of group names]]> |
| </doc> |
| </method> |
| <method name="login" return="org.apache.hadoop.security.UserGroupInformation" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="LoginException" type="javax.security.auth.login.LoginException"/> |
| <doc> |
| <![CDATA[Login and return a UserGroupInformation object.]]> |
| </doc> |
| </method> |
| <method name="readFrom" return="org.apache.hadoop.security.UserGroupInformation" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Read a {@link UserGroupInformation} from conf]]> |
| </doc> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[A {@link Writable} abstract class for storing user and groups information.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.security.UserGroupInformation --> |
| </package> |
| <package name="org.apache.hadoop.tools"> |
| <!-- start class org.apache.hadoop.tools.Logalyzer --> |
| <class name="Logalyzer" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Logalyzer" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="doArchive" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="logListURI" type="java.lang.String"/> |
| <param name="archiveDirectory" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[doArchive: Workhorse function to archive log-files. |
| @param logListURI : The uri which will serve list of log-files to archive. |
| @param archiveDirectory : The directory to store archived logfiles. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="doAnalyze" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="inputFilesDirectory" type="java.lang.String"/> |
| <param name="outputDirectory" type="java.lang.String"/> |
| <param name="grepPattern" type="java.lang.String"/> |
| <param name="sortColumns" type="java.lang.String"/> |
| <param name="columnSeparator" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[doAnalyze: |
| @param inputFilesDirectory : Directory containing the files to be analyzed. |
| @param outputDirectory : Directory to store analysis (output). |
| @param grepPattern : Pattern to *grep* for. |
| @param sortColumns : Sort specification for output. |
| @param columnSeparator : Column separator. |
| @throws IOException]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| </method> |
| <doc> |
| <![CDATA[Logalyzer: A utility tool for archiving and analyzing hadoop logs. |
| <p> |
| This tool supports archiving and anaylzing (sort/grep) of log-files. |
| It takes as input |
| a) Input uri which will serve uris of the logs to be archived. |
| b) Output directory (not mandatory). |
| b) Directory on dfs to archive the logs. |
| c) The sort/grep patterns for analyzing the files and separator for boundaries. |
| Usage: |
| Logalyzer -archive -archiveDir <directory to archive logs> -analysis <directory> -logs <log-list uri> -grep <pattern> -sort <col1, col2> -separator <separator> |
| <p>]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.tools.Logalyzer --> |
| <!-- start class org.apache.hadoop.tools.Logalyzer.LogComparator --> |
| <class name="Logalyzer.LogComparator" extends="org.apache.hadoop.io.Text.Comparator" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.conf.Configurable"/> |
| <constructor name="Logalyzer.LogComparator" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| </method> |
| <method name="getConf" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="b1" type="byte[]"/> |
| <param name="s1" type="int"/> |
| <param name="l1" type="int"/> |
| <param name="b2" type="byte[]"/> |
| <param name="s2" type="int"/> |
| <param name="l2" type="int"/> |
| </method> |
| <doc> |
| <![CDATA[A WritableComparator optimized for UTF8 keys of the logs.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.tools.Logalyzer.LogComparator --> |
| <!-- start class org.apache.hadoop.tools.Logalyzer.LogRegexMapper --> |
| <class name="Logalyzer.LogRegexMapper" extends="org.apache.hadoop.mapred.MapReduceBase" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.mapred.Mapper<K, org.apache.hadoop.io.Text, org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable>"/> |
| <constructor name="Logalyzer.LogRegexMapper" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="configure" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| </method> |
| <method name="map" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="key" type="K extends org.apache.hadoop.io.WritableComparable"/> |
| <param name="value" type="org.apache.hadoop.io.Text"/> |
| <param name="output" type="org.apache.hadoop.mapred.OutputCollector<org.apache.hadoop.io.Text, org.apache.hadoop.io.LongWritable>"/> |
| <param name="reporter" type="org.apache.hadoop.mapred.Reporter"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <doc> |
| <![CDATA[A {@link Mapper} that extracts text matching a regular expression.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.tools.Logalyzer.LogRegexMapper --> |
| </package> |
| <package name="org.apache.hadoop.util"> |
| <!-- start class org.apache.hadoop.util.CopyFiles --> |
| <class name="CopyFiles" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.util.Tool"/> |
| <constructor name="CopyFiles" type="org.apache.hadoop.conf.Configuration" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| </method> |
| <method name="getConf" return="org.apache.hadoop.conf.Configuration" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="copy" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="srcPath" type="java.lang.String"/> |
| <param name="destPath" type="java.lang.String"/> |
| <param name="logPath" type="org.apache.hadoop.fs.Path"/> |
| <param name="srcAsList" type="boolean"/> |
| <param name="ignoreReadFailures" type="boolean"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="run" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[This is the main driver for recursively copying directories |
| across file systems. It takes at least two cmdline parameters. A source |
| URL and a destination URL. It then essentially does an "ls -lR" on the |
| source URL, and writes the output in a round-robin manner to all the map |
| input files. The mapper actually copies the files allotted to it. The |
| reduce is empty.]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| </method> |
| <doc> |
| <![CDATA[A Map-reduce program to recursively copy directories between |
| different file-systems.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.CopyFiles --> |
| <!-- start class org.apache.hadoop.util.CopyFiles.DuplicationException --> |
| <class name="CopyFiles.DuplicationException" extends="java.io.IOException" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <field name="ERROR_CODE" type="int" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Error code for this exception]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[An exception class for duplicated source files.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.CopyFiles.DuplicationException --> |
| <!-- start class org.apache.hadoop.util.Daemon --> |
| <class name="Daemon" extends="java.lang.Thread" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Daemon" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a daemon thread.]]> |
| </doc> |
| </constructor> |
| <constructor name="Daemon" type="java.lang.Runnable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a daemon thread.]]> |
| </doc> |
| </constructor> |
| <constructor name="Daemon" type="java.lang.ThreadGroup, java.lang.Runnable" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Construct a daemon thread to be part of a specified thread group.]]> |
| </doc> |
| </constructor> |
| <method name="getRunnable" return="java.lang.Runnable" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[A thread that has called {@link Thread#setDaemon(boolean) } with true.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.Daemon --> |
| <!-- start class org.apache.hadoop.util.DiskChecker --> |
| <class name="DiskChecker" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="DiskChecker" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="mkdirsWithExistsCheck" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dir" type="java.io.File"/> |
| <doc> |
| <![CDATA[The semantics of mkdirsWithExistsCheck method is different from the mkdirs |
| method provided in the Sun's java.io.File class in the following way: |
| While creating the non-existent parent directories, this method checks for |
| the existence of those directories if the mkdir fails at any point (since |
| that directory might have just been created by some other process). |
| If both mkdir() and the exists() check fails for any seemingly |
| non-existent directory, then we signal an error; Sun's mkdir would signal |
| an error (return false) if a directory it is attempting to create already |
| exists or the mkdir fails. |
| @param dir |
| @return true on success, false on failure]]> |
| </doc> |
| </method> |
| <method name="checkDir" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dir" type="java.io.File"/> |
| <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/> |
| </method> |
| <doc> |
| <![CDATA[Class that provides utility functions for checking disk problem]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.DiskChecker --> |
| <!-- start class org.apache.hadoop.util.DiskChecker.DiskErrorException --> |
| <class name="DiskChecker.DiskErrorException" extends="java.io.IOException" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="DiskChecker.DiskErrorException" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| </class> |
| <!-- end class org.apache.hadoop.util.DiskChecker.DiskErrorException --> |
| <!-- start class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException --> |
| <class name="DiskChecker.DiskOutOfSpaceException" extends="java.io.IOException" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="DiskChecker.DiskOutOfSpaceException" type="java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| </class> |
| <!-- end class org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException --> |
| <!-- start class org.apache.hadoop.util.GenericOptionsParser --> |
| <class name="GenericOptionsParser" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, java.lang.String[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a <code>GenericOptionsParser<code> to parse only the generic Hadoop |
| arguments. |
| |
| The array of string arguments other than the generic arguments can be |
| obtained by {@link #getRemainingArgs()}. |
| |
| @param conf the <code>Configuration</code> to modify. |
| @param args command-line arguments.]]> |
| </doc> |
| </constructor> |
| <constructor name="GenericOptionsParser" type="org.apache.hadoop.conf.Configuration, org.apache.commons.cli.Options, java.lang.String[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Create a <code>GenericOptionsParser</code> to parse given options as well |
| as generic Hadoop options. |
| |
| The resulting <code>CommandLine</code> object can be obtained by |
| {@link #getCommandLine()}. |
| |
| @param conf the configuration to modify |
| @param options options built by the caller |
| @param args User-specified arguments]]> |
| </doc> |
| </constructor> |
| <method name="getRemainingArgs" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns an array of Strings containing only application-specific arguments. |
| |
| @return array of <code>String</code>s containing the un-parsed arguments.]]> |
| </doc> |
| </method> |
| <method name="getCommandLine" return="org.apache.commons.cli.CommandLine" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the commons-cli <code>CommandLine</code> object |
| to process the parsed arguments. |
| |
| Note: If the object is created with |
| {@link #GenericOptionsParser(Configuration, String[])}, then returned |
| object will only contain parsed generic options. |
| |
| @return <code>CommandLine</code> representing list of arguments |
| parsed against Options descriptor.]]> |
| </doc> |
| </method> |
| <method name="printGenericCommandUsage" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.PrintStream"/> |
| <doc> |
| <![CDATA[Print the usage message for generic command-line options supported. |
| |
| @param out stream to print the usage message to.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[<code>GenericOptionsParser</code> is a utility to parse command line |
| arguments generic to the Hadoop framework. |
| |
| <code>GenericOptionsParser</code> recognizes several standarad command |
| line arguments, enabling applications to easily specify a namenode, a |
| jobtracker, additional configuration resources etc. |
| |
| <h4 id="GenericOptions">Generic Options</h4> |
| |
| <p>The supported generic options are:</p> |
| <p><blockquote><pre> |
| -conf <configuration file> specify a configuration file |
| -D <property=value> use value for given property |
| -fs <local|namenode:port> specify a namenode |
| -jt <local|jobtracker:port> specify a job tracker |
| </pre></blockquote></p> |
| |
| <p>The general command line syntax is:</p> |
| <p><tt><pre> |
| bin/hadoop command [genericOptions] [commandOptions] |
| </pre></tt></p> |
| |
| <p>Generic command line arguments <strong>might</strong> modify |
| <code>Configuration </code> objects, given to constructors.</p> |
| |
| <p>The functionality is implemented using Commons CLI.</p> |
| |
| <p>Examples:</p> |
| <p><blockquote><pre> |
| $ bin/hadoop dfs -fs darwin:8020 -ls /data |
| list /data directory in dfs with namenode darwin:8020 |
| |
| $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data |
| list /data directory in dfs with namenode darwin:8020 |
| |
| $ bin/hadoop dfs -conf hadoop-site.xml -ls /data |
| list /data directory in dfs with conf specified in hadoop-site.xml |
| |
| $ bin/hadoop job -D mapred.job.tracker=darwin:50020 -submit job.xml |
| submit a job to job tracker darwin:50020 |
| |
| $ bin/hadoop job -jt darwin:50020 -submit job.xml |
| submit a job to job tracker darwin:50020 |
| |
| $ bin/hadoop job -jt local -submit job.xml |
| submit a job to local runner |
| </pre></blockquote></p> |
| |
| @see Tool |
| @see ToolRunner]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.GenericOptionsParser --> |
| <!-- start class org.apache.hadoop.util.GenericsUtil --> |
| <class name="GenericsUtil" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="GenericsUtil" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getClass" return="java.lang.Class<T>" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="t" type="T"/> |
| <doc> |
| <![CDATA[Returns the Class object (of type <code>Class<T></code>) of the |
| argument of type <code>T</code>. |
| @param <T> The type of the argument |
| @param t the object to get it class |
| @return <code>Class<T></code>]]> |
| </doc> |
| </method> |
| <method name="toArray" return="T[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="c" type="java.lang.Class<T>"/> |
| <param name="list" type="java.util.List<T>"/> |
| <doc> |
| <![CDATA[Converts the given <code>List<T></code> to a an array of |
| <code>T[]</code>. |
| @param c the Class object of the items in the list |
| @param list the list to convert]]> |
| </doc> |
| </method> |
| <method name="toArray" return="T[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="list" type="java.util.List<T>"/> |
| <doc> |
| <![CDATA[Converts the given <code>List<T></code> to a an array of |
| <code>T[]</code>. |
| @param list the list to convert |
| @throws ArrayIndexOutOfBoundsException if the list is empty. |
| Use {@link #toArray(Class, List)} if the list may be empty.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Contains utility methods for dealing with Java Generics.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.GenericsUtil --> |
| <!-- start class org.apache.hadoop.util.HostsFileReader --> |
| <class name="HostsFileReader" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="HostsFileReader" type="java.lang.String, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </constructor> |
| <method name="refresh" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getHosts" return="java.util.Set<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <method name="getExcludedHosts" return="java.util.Set<java.lang.String>" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| </class> |
| <!-- end class org.apache.hadoop.util.HostsFileReader --> |
| <!-- start interface org.apache.hadoop.util.IndexedSortable --> |
| <interface name="IndexedSortable" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="compare" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| <param name="j" type="int"/> |
| <doc> |
| <![CDATA[Compare items at the given addresses consistent with the semantics of |
| {@link java.util.Comparable#compare}.]]> |
| </doc> |
| </method> |
| <method name="swap" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="i" type="int"/> |
| <param name="j" type="int"/> |
| <doc> |
| <![CDATA[Swap items at the given addresses.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Interface for collections capable of being sorted by {@link IndexedSorter} |
| algorithms.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.util.IndexedSortable --> |
| <!-- start interface org.apache.hadoop.util.IndexedSorter --> |
| <interface name="IndexedSorter" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="sort" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="s" type="org.apache.hadoop.util.IndexedSortable"/> |
| <param name="l" type="int"/> |
| <param name="r" type="int"/> |
| <doc> |
| <![CDATA[Sort the items accessed through the given IndexedSortable over the given |
| range of logical indices. From the perspective of the sort algorithm, |
| each index between l (inclusive) and r (exclusive) is an addressable |
| entry. |
| @see IndexedSortable#compare |
| @see IndexedSortable#swap]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Interface for sort algorithms accepting {@link IndexedSortable} items. |
| |
| A sort algorithm implementing this interface may only |
| {@link IndexedSortable#compare} and {@link IndexedSortable#swap} items |
| for a range of indices to effect a sort across that range.]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.util.IndexedSorter --> |
| <!-- start class org.apache.hadoop.util.MergeSort --> |
| <class name="MergeSort" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="MergeSort" type="java.util.Comparator<org.apache.hadoop.io.IntWritable>" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="mergeSort" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="src" type="int[]"/> |
| <param name="dest" type="int[]"/> |
| <param name="low" type="int"/> |
| <param name="high" type="int"/> |
| </method> |
| <doc> |
| <![CDATA[An implementation of the core algorithm of MergeSort.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.MergeSort --> |
| <!-- start class org.apache.hadoop.util.NativeCodeLoader --> |
| <class name="NativeCodeLoader" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="NativeCodeLoader" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="isNativeCodeLoaded" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Check if native-hadoop code is loaded for this platform. |
| |
| @return <code>true</code> if native-hadoop is loaded, |
| else <code>false</code>]]> |
| </doc> |
| </method> |
| <method name="getLoadNativeLibraries" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Return if native hadoop libraries, if present, can be used for this job. |
| @param jobConf job configuration |
| |
| @return <code>true</code> if native hadoop libraries, if present, can be |
| used for this job; <code>false</code> otherwise.]]> |
| </doc> |
| </method> |
| <method name="setLoadNativeLibraries" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jobConf" type="org.apache.hadoop.mapred.JobConf"/> |
| <param name="loadNativeLibraries" type="boolean"/> |
| <doc> |
| <![CDATA[Set if native hadoop libraries, if present, can be used for this job. |
| |
| @param jobConf job configuration |
| @param loadNativeLibraries can native hadoop libraries be loaded]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A helper to load the native hadoop code i.e. libhadoop.so. |
| This handles the fallback to either the bundled libhadoop-Linux-i386-32.so |
| or the default java implementations where appropriate.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.NativeCodeLoader --> |
| <!-- start class org.apache.hadoop.util.PlatformName --> |
| <class name="PlatformName" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="PlatformName" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getPlatformName" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the complete platform as per the java-vm. |
| @return returns the complete platform as per the java-vm.]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| </method> |
| <doc> |
| <![CDATA[A helper class for getting build-info of the java-vm.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.PlatformName --> |
| <!-- start class org.apache.hadoop.util.PrintJarMainClass --> |
| <class name="PrintJarMainClass" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="PrintJarMainClass" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <doc> |
| <![CDATA[@param args]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A micro-application that prints the main class name out of a jar file.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.PrintJarMainClass --> |
| <!-- start class org.apache.hadoop.util.PriorityQueue --> |
| <class name="PriorityQueue" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="PriorityQueue" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="lessThan" return="boolean" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="a" type="java.lang.Object"/> |
| <param name="b" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Determines the ordering of objects in this priority queue. Subclasses |
| must define this one method.]]> |
| </doc> |
| </method> |
| <method name="initialize" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="maxSize" type="int"/> |
| <doc> |
| <![CDATA[Subclass constructors must call this.]]> |
| </doc> |
| </method> |
| <method name="put" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <param name="element" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Adds an Object to a PriorityQueue in log(size) time. |
| If one tries to add more objects than maxSize from initialize |
| a RuntimeException (ArrayIndexOutOfBound) is thrown.]]> |
| </doc> |
| </method> |
| <method name="insert" return="boolean" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="element" type="java.lang.Object"/> |
| <doc> |
| <![CDATA[Adds element to the PriorityQueue in log(size) time if either |
| the PriorityQueue is not full, or not lessThan(element, top()). |
| @param element |
| @return true if element is added, false otherwise.]]> |
| </doc> |
| </method> |
| <method name="top" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the least element of the PriorityQueue in constant time.]]> |
| </doc> |
| </method> |
| <method name="pop" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Removes and returns the least element of the PriorityQueue in log(size) |
| time.]]> |
| </doc> |
| </method> |
| <method name="adjustTop" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Should be called when the Object at top changes values. Still log(n) |
| worst case, but it's at least twice as fast to <pre> |
| { pq.top().change(); pq.adjustTop(); } |
| </pre> instead of <pre> |
| { o = pq.pop(); o.change(); pq.push(o); } |
| </pre>]]> |
| </doc> |
| </method> |
| <method name="size" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the number of elements currently stored in the PriorityQueue.]]> |
| </doc> |
| </method> |
| <method name="clear" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Removes all entries from the PriorityQueue.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A PriorityQueue maintains a partial ordering of its elements such that the |
| least element can always be found in constant time. Put()'s and pop()'s |
| require log(size) time.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.PriorityQueue --> |
| <!-- start class org.apache.hadoop.util.ProgramDriver --> |
| <class name="ProgramDriver" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ProgramDriver" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="addClass" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="name" type="java.lang.String"/> |
| <param name="mainClass" type="java.lang.Class"/> |
| <param name="description" type="java.lang.String"/> |
| <exception name="Throwable" type="java.lang.Throwable"/> |
| <doc> |
| <![CDATA[This is the method that adds the classed to the repository |
| @param name The name of the string you want the class instance to be called with |
| @param mainClass The class that you want to add to the repository |
| @param description The description of the class |
| @throws NoSuchMethodException |
| @throws SecurityException]]> |
| </doc> |
| </method> |
| <method name="driver" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Throwable" type="java.lang.Throwable"/> |
| <doc> |
| <![CDATA[This is a driver for the example programs. |
| It looks at the first command line argument and tries to find an |
| example program with that name. |
| If it is found, it calls the main method in that class with the rest |
| of the command line arguments. |
| @param args The argument from the user. args[0] is the command to run. |
| @throws NoSuchMethodException |
| @throws SecurityException |
| @throws IllegalAccessException |
| @throws IllegalArgumentException |
| @throws Throwable Anything thrown by the example program's main]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A driver that is used to run programs added to it]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.ProgramDriver --> |
| <!-- start class org.apache.hadoop.util.Progress --> |
| <class name="Progress" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Progress" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Creates a new root node.]]> |
| </doc> |
| </constructor> |
| <method name="addPhase" return="org.apache.hadoop.util.Progress" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="status" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Adds a named node to the tree.]]> |
| </doc> |
| </method> |
| <method name="addPhase" return="org.apache.hadoop.util.Progress" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Adds a node to the tree.]]> |
| </doc> |
| </method> |
| <method name="startNextPhase" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Called during execution to move to the next phase at this level in the |
| tree.]]> |
| </doc> |
| </method> |
| <method name="phase" return="org.apache.hadoop.util.Progress" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the current sub-node executing.]]> |
| </doc> |
| </method> |
| <method name="complete" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Completes this node, moving the parent node to its next child.]]> |
| </doc> |
| </method> |
| <method name="set" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="progress" type="float"/> |
| <doc> |
| <![CDATA[Called during execution on a leaf node to set its progress.]]> |
| </doc> |
| </method> |
| <method name="get" return="float" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Returns the overall progress of the root.]]> |
| </doc> |
| </method> |
| <method name="setStatus" |
| abstract="false" native="false" synchronized="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="status" type="java.lang.String"/> |
| </method> |
| <method name="toString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[Utility to assist with generation of progress reports. Applications build |
| a hierarchy of {@link Progress} instances, each modelling a phase of |
| execution. The root is constructed with {@link #Progress()}. Nodes for |
| sub-phases are created by calling {@link #addPhase()}.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.Progress --> |
| <!-- start interface org.apache.hadoop.util.Progressable --> |
| <interface name="Progressable" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <method name="progress" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Report progress to the Hadoop framework.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A facility for reporting progress. |
| |
| <p>Clients and/or applications can use the provided <code>Progressable</code> |
| to explicitly report progress to the Hadoop framework. This is especially |
| important for operations which take an insignificant amount of time since, |
| in-lieu of the reported progress, the framework has to assume that an error |
| has occured and time-out the operation.</p>]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.util.Progressable --> |
| <!-- start class org.apache.hadoop.util.QuickSort --> |
| <class name="QuickSort" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.util.IndexedSorter"/> |
| <constructor name="QuickSort" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="sort" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="s" type="org.apache.hadoop.util.IndexedSortable"/> |
| <param name="p" type="int"/> |
| <param name="r" type="int"/> |
| </method> |
| <method name="sort" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="s" type="org.apache.hadoop.util.IndexedSortable"/> |
| <param name="p" type="int"/> |
| <param name="r" type="int"/> |
| <param name="rep" type="org.apache.hadoop.util.Progressable"/> |
| <doc> |
| <![CDATA[Same as {@link #sort}, but indicate that we're making progress after |
| each partition.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[An implementation of the core algorithm of QuickSort. |
| See "Median-of-Three Partitioning" in Sedgewick book.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.QuickSort --> |
| <!-- start class org.apache.hadoop.util.ReflectionUtils --> |
| <class name="ReflectionUtils" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ReflectionUtils" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="setConf" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="theObject" type="java.lang.Object"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Check and set 'configuration' if necessary. |
| |
| @param theObject object for which to set configuration |
| @param conf Configuration]]> |
| </doc> |
| </method> |
| <method name="newInstance" return="java.lang.Object" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="theClass" type="java.lang.Class<?>"/> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <doc> |
| <![CDATA[Create an object for the given class and initialize it from conf |
| |
| @param theClass class of which an object is created |
| @param conf Configuration |
| @return a new object]]> |
| </doc> |
| </method> |
| <method name="setContentionTracing" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="val" type="boolean"/> |
| </method> |
| <method name="printThreadInfo" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="stream" type="java.io.PrintWriter"/> |
| <param name="title" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Print all of the thread's information and stack traces. |
| |
| @param stream the stream to |
| @param title a string title for the stack trace]]> |
| </doc> |
| </method> |
| <method name="logThreadInfo" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="log" type="org.apache.commons.logging.Log"/> |
| <param name="title" type="java.lang.String"/> |
| <param name="minInterval" type="long"/> |
| <doc> |
| <![CDATA[Log the current thread stacks at INFO level. |
| @param log the logger that logs the stack trace |
| @param title a descriptive title for the call stacks |
| @param minInterval the minimum time from the last]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[General reflection utils]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.ReflectionUtils --> |
| <!-- start class org.apache.hadoop.util.RunJar --> |
| <class name="RunJar" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="RunJar" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="unJar" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="jarFile" type="java.io.File"/> |
| <param name="toDir" type="java.io.File"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Unpack a jar file into a directory.]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Throwable" type="java.lang.Throwable"/> |
| <doc> |
| <![CDATA[Run a Hadoop job jar. If the main class is not in the jar's manifest, |
| then it must be provided on the command line.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[Run a Hadoop job jar.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.RunJar --> |
| <!-- start class org.apache.hadoop.util.ServletUtil --> |
| <class name="ServletUtil" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ServletUtil" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="initHTML" return="java.io.PrintWriter" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="response" type="javax.servlet.ServletResponse"/> |
| <param name="title" type="java.lang.String"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Initial HTML header]]> |
| </doc> |
| </method> |
| <method name="getParameter" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="request" type="javax.servlet.ServletRequest"/> |
| <param name="name" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Get a parameter from a ServletRequest. |
| Return null if the parameter contains only white spaces.]]> |
| </doc> |
| </method> |
| <method name="htmlFooter" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[HTML footer to be added in the jsps. |
| @return the HTML footer.]]> |
| </doc> |
| </method> |
| <field name="HTML_TAIL" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| </class> |
| <!-- end class org.apache.hadoop.util.ServletUtil --> |
| <!-- start class org.apache.hadoop.util.Shell --> |
| <class name="Shell" extends="java.lang.Object" |
| abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Shell" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="Shell" type="long" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[@param interval the minimum duration to wait before re-executing the |
| command.]]> |
| </doc> |
| </constructor> |
| <method name="getGROUPS_COMMAND" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[a Unix command to get the current user's groups list]]> |
| </doc> |
| </method> |
| <method name="getGET_PERMISSION_COMMAND" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return a Unix command to get permission information.]]> |
| </doc> |
| </method> |
| <method name="getUlimitMemoryCommand" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="job" type="org.apache.hadoop.mapred.JobConf"/> |
| <doc> |
| <![CDATA[Get the Unix command for setting the maximum virtual memory available |
| to a given child process. This is only relevant when we are forking a |
| process from within the {@link org.apache.hadoop.mapred.Mapper} or the |
| {@link org.apache.hadoop.mapred.Reducer} implementations |
| e.g. <a href="{@docRoot}/org/apache/hadoop/mapred/pipes/package-summary.html">Hadoop Pipes</a> |
| or <a href="{@docRoot}/org/apache/hadoop/streaming/package-summary.html">Hadoop Streaming</a>. |
| |
| It also checks to ensure that we are running on a *nix platform else |
| (e.g. in Cygwin/Windows) it returns <code>null</code>. |
| @param job job configuration |
| @return a <code>String[]</code> with the ulimit command arguments or |
| <code>null</code> if we are running on a non *nix platform or |
| if the limit is unspecified.]]> |
| </doc> |
| </method> |
| <method name="setEnvironment" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="env" type="java.util.Map<java.lang.String, java.lang.String>"/> |
| <doc> |
| <![CDATA[set the environment for the command |
| @param env Mapping of environment variables]]> |
| </doc> |
| </method> |
| <method name="setWorkingDirectory" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="dir" type="java.io.File"/> |
| <doc> |
| <![CDATA[set the working directory |
| @param dir The directory where the command would be executed]]> |
| </doc> |
| </method> |
| <method name="run" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[check to see if a command needs to be executed and execute if needed]]> |
| </doc> |
| </method> |
| <method name="getExecString" return="java.lang.String[]" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[return an array containing the command name & its parameters]]> |
| </doc> |
| </method> |
| <method name="parseExecResult" |
| abstract="true" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="lines" type="java.io.BufferedReader"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Parse the execution result]]> |
| </doc> |
| </method> |
| <method name="getProcess" return="java.lang.Process" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[get the current sub-process executing the given command |
| @return process executing the command]]> |
| </doc> |
| </method> |
| <method name="getExitCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[get the exit code |
| @return the exit code of the process]]> |
| </doc> |
| </method> |
| <method name="execCommand" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="cmd" type="java.lang.String[]"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Static method to execute a shell command. |
| Covers most of the simple cases without requiring the user to implement |
| the <code>Shell</code> interface. |
| @param cmd shell command to execute. |
| @return the output of the executed command.]]> |
| </doc> |
| </method> |
| <field name="LOG" type="org.apache.commons.logging.Log" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="USER_NAME_COMMAND" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[a Unix command to get the current user's name]]> |
| </doc> |
| </field> |
| <field name="SET_PERMISSION_COMMAND" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[a Unix command to set permission]]> |
| </doc> |
| </field> |
| <field name="SET_OWNER_COMMAND" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[a Unix command to set owner]]> |
| </doc> |
| </field> |
| <field name="SET_GROUP_COMMAND" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="WINDOWS" type="boolean" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Set to true on Windows platforms]]> |
| </doc> |
| </field> |
| <doc> |
| <![CDATA[A base class for running a Unix command. |
| |
| <code>Shell</code> can be used to run unix commands like <code>du</code> or |
| <code>df</code>. It also offers facilities to gate commands by |
| time-intervals.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.Shell --> |
| <!-- start class org.apache.hadoop.util.Shell.ExitCodeException --> |
| <class name="Shell.ExitCodeException" extends="java.io.IOException" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Shell.ExitCodeException" type="int, java.lang.String" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getExitCode" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </method> |
| <doc> |
| <![CDATA[This is an IOException with exit code added.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.Shell.ExitCodeException --> |
| <!-- start class org.apache.hadoop.util.Shell.ShellCommandExecutor --> |
| <class name="Shell.ShellCommandExecutor" extends="org.apache.hadoop.util.Shell" |
| abstract="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[]" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <constructor name="Shell.ShellCommandExecutor" type="java.lang.String[], java.io.File, java.util.Map<java.lang.String, java.lang.String>" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="execute" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <exception name="IOException" type="java.io.IOException"/> |
| <doc> |
| <![CDATA[Execute the shell command.]]> |
| </doc> |
| </method> |
| <method name="getExecString" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| </method> |
| <method name="parseExecResult" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="protected" |
| deprecated="not deprecated"> |
| <param name="lines" type="java.io.BufferedReader"/> |
| <exception name="IOException" type="java.io.IOException"/> |
| </method> |
| <method name="getOutput" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the output of the shell command.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A simple shell command executor. |
| |
| <code>ShellCommandExecutor</code>should be used in cases where the output |
| of the command needs no explicit parsing and where the command, working |
| directory and the environment remains unchanged. The output of the command |
| is stored as-is and is expected to be small.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.Shell.ShellCommandExecutor --> |
| <!-- start class org.apache.hadoop.util.StringUtils --> |
| <class name="StringUtils" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="StringUtils" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="stringifyException" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="e" type="java.lang.Throwable"/> |
| <doc> |
| <![CDATA[Make a string representation of the exception. |
| @param e The exception to stringify |
| @return A string with exception name and call stack.]]> |
| </doc> |
| </method> |
| <method name="simpleHostname" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="fullHostname" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Given a full hostname, return the word upto the first dot. |
| @param fullHostname the full hostname |
| @return the hostname to the first dot]]> |
| </doc> |
| </method> |
| <method name="humanReadableInt" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="number" type="long"/> |
| <doc> |
| <![CDATA[Given an integer, return a string that is in an approximate, but human |
| readable format. |
| It uses the bases 'k', 'm', and 'g' for 1024, 1024**2, and 1024**3. |
| @param number the number to format |
| @return a human readable form of the integer]]> |
| </doc> |
| </method> |
| <method name="formatPercent" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="done" type="double"/> |
| <param name="digits" type="int"/> |
| <doc> |
| <![CDATA[Format a percentage for presentation to the user. |
| @param done the percentage to format (0.0 to 1.0) |
| @param digits the number of digits past the decimal point |
| @return a string representation of the percentage]]> |
| </doc> |
| </method> |
| <method name="arrayToString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="strs" type="java.lang.String[]"/> |
| <doc> |
| <![CDATA[Given an array of strings, return a comma-separated list of its elements. |
| @param strs Array of strings |
| @return Empty string if strs.length is 0, comma separated list of strings |
| otherwise]]> |
| </doc> |
| </method> |
| <method name="byteToHexString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="bytes" type="byte[]"/> |
| <doc> |
| <![CDATA[Given an array of bytes it will convert the bytes to a hex string |
| representation of the bytes |
| @param bytes |
| @return hex string representation of the byte array]]> |
| </doc> |
| </method> |
| <method name="hexStringToByte" return="byte[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="hex" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Given a hexstring this will return the byte array corresponding to the |
| string |
| @param hex the hex String array |
| @return a byte array that is a hex string representation of the given |
| string. The size of the byte array is therefore hex.length/2]]> |
| </doc> |
| </method> |
| <method name="uriToString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="uris" type="java.net.URI[]"/> |
| <doc> |
| <![CDATA[@param uris]]> |
| </doc> |
| </method> |
| <method name="stringToURI" return="java.net.URI[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="str" type="java.lang.String[]"/> |
| <doc> |
| <![CDATA[@param str]]> |
| </doc> |
| </method> |
| <method name="stringToPath" return="org.apache.hadoop.fs.Path[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="str" type="java.lang.String[]"/> |
| <doc> |
| <![CDATA[@param str]]> |
| </doc> |
| </method> |
| <method name="formatTimeDiff" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="finishTime" type="long"/> |
| <param name="startTime" type="long"/> |
| <doc> |
| <![CDATA[Given a finish and start time in long milliseconds, returns a |
| String in the format Xhrs, Ymins, Z sec, for the time difference between two times. |
| If finish time comes before start time then negative valeus of X, Y and Z wil return. |
| |
| @param finishTime finish time |
| @param startTime start time]]> |
| </doc> |
| </method> |
| <method name="getFormattedTimeWithDiff" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="dateFormat" type="java.text.DateFormat"/> |
| <param name="finishTime" type="long"/> |
| <param name="startTime" type="long"/> |
| <doc> |
| <![CDATA[Formats time in ms and appends difference (finishTime - startTime) |
| as returned by formatTimeDiff(). |
| If finish time is 0, empty string is returned, if start time is 0 |
| then difference is not appended to return value. |
| @param dateFormat date format to use |
| @param finishTime fnish time |
| @param startTime start time |
| @return formatted value.]]> |
| </doc> |
| </method> |
| <method name="getStrings" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="str" type="java.lang.String"/> |
| <doc> |
| <![CDATA[returns an arraylist of strings |
| @param str the comma seperated string values |
| @return the arraylist of the comma seperated string values]]> |
| </doc> |
| </method> |
| <method name="split" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="str" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Split a string using the default separator |
| @param str a string that may have escaped separator |
| @return an array of strings]]> |
| </doc> |
| </method> |
| <method name="split" return="java.lang.String[]" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="str" type="java.lang.String"/> |
| <param name="escapeChar" type="char"/> |
| <param name="separator" type="char"/> |
| <doc> |
| <![CDATA[Split a string using the given separator |
| @param str a string that may have escaped separator |
| @param escapeChar a char that be used to escape the separator |
| @param separator a separator char |
| @return an array of strings]]> |
| </doc> |
| </method> |
| <method name="escapeString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="str" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Escape commas in the string using the default escape char |
| @param str a string |
| @return an escaped string]]> |
| </doc> |
| </method> |
| <method name="escapeString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="str" type="java.lang.String"/> |
| <param name="escapeChar" type="char"/> |
| <param name="charToEscape" type="char"/> |
| <doc> |
| <![CDATA[Escape <code>charToEscape</code> in the string |
| with the escape char <code>escapeChar</code> |
| |
| @param str string |
| @param escapeChar escape char |
| @param charToEscape the char to be escaped |
| @return an escaped string]]> |
| </doc> |
| </method> |
| <method name="unEscapeString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="str" type="java.lang.String"/> |
| <doc> |
| <![CDATA[Unescape commas in the string using the default escape char |
| @param str a string |
| @return an unescaped string]]> |
| </doc> |
| </method> |
| <method name="unEscapeString" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="str" type="java.lang.String"/> |
| <param name="escapeChar" type="char"/> |
| <param name="charToEscape" type="char"/> |
| <doc> |
| <![CDATA[Unescape <code>charToEscape</code> in the string |
| with the escape char <code>escapeChar</code> |
| |
| @param str string |
| @param escapeChar escape char |
| @param charToEscape the escaped char |
| @return an unescaped string]]> |
| </doc> |
| </method> |
| <method name="getHostname" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Return hostname without throwing exception. |
| @return hostname]]> |
| </doc> |
| </method> |
| <method name="startupShutdownMessage" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="clazz" type="java.lang.Class"/> |
| <param name="args" type="java.lang.String[]"/> |
| <param name="LOG" type="org.apache.commons.logging.Log"/> |
| <doc> |
| <![CDATA[Print a log message for starting up and shutting down |
| @param clazz the class of the server |
| @param args arguments |
| @param LOG the target log object]]> |
| </doc> |
| </method> |
| <field name="COMMA" type="char" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="COMMA_STR" type="java.lang.String" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <field name="ESCAPE_CHAR" type="char" |
| transient="false" volatile="false" |
| static="true" final="true" visibility="public" |
| deprecated="not deprecated"> |
| </field> |
| <doc> |
| <![CDATA[General string utils]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.StringUtils --> |
| <!-- start interface org.apache.hadoop.util.Tool --> |
| <interface name="Tool" abstract="true" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <implements name="org.apache.hadoop.conf.Configurable"/> |
| <method name="run" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[Execute the command with the given arguments. |
| |
| @param args command specific arguments. |
| @return exit code. |
| @throws Exception]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A tool interface that supports handling of generic command-line options. |
| |
| <p><code>Tool</code>, is the standard for any Map-Reduce tool/application. |
| The tool/application should delegate the handling of |
| <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions"> |
| standard command-line options</a> to {@link ToolRunner#run(Tool, String[])} |
| and only handle its custom arguments.</p> |
| |
| <p>Here is how a typical <code>Tool</code> is implemented:</p> |
| <p><blockquote><pre> |
| public class MyApp extends Configured implements Tool { |
| |
| public int run(String[] args) throws Exception { |
| // <code>Configuration</code> processed by <code>ToolRunner</code> |
| Configuration conf = getConf(); |
| |
| // Create a JobConf using the processed <code>conf</code> |
| JobConf job = new JobConf(conf, MyApp.class); |
| |
| // Process custom command-line options |
| Path in = new Path(args[1]); |
| Path out = new Path(args[2]); |
| |
| // Specify various job-specific parameters |
| job.setJobName("my-app"); |
| job.setInputPath(in); |
| job.setOutputPath(out); |
| job.setMapperClass(MyApp.MyMapper.class); |
| job.setReducerClass(MyApp.MyReducer.class); |
| |
| // Submit the job, then poll for progress until the job is complete |
| JobClient.runJob(job); |
| } |
| |
| public static void main(String[] args) throws Exception { |
| // Let <code>ToolRunner</code> handle generic command-line options |
| int res = ToolRunner.run(new Configuration(), new Sort(), args); |
| |
| System.exit(res); |
| } |
| } |
| </pre></blockquote></p> |
| |
| @see GenericOptionsParser |
| @see ToolRunner]]> |
| </doc> |
| </interface> |
| <!-- end interface org.apache.hadoop.util.Tool --> |
| <!-- start class org.apache.hadoop.util.ToolRunner --> |
| <class name="ToolRunner" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="ToolRunner" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="run" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="conf" type="org.apache.hadoop.conf.Configuration"/> |
| <param name="tool" type="org.apache.hadoop.util.Tool"/> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[Runs the given <code>Tool</code> by {@link Tool#run(String[])}, after |
| parsing with the given generic arguments. Uses the given |
| <code>Configuration</code>, or builds one if null. |
| |
| Sets the <code>Tool</code>'s configuration with the possibly modified |
| version of the <code>conf</code>. |
| |
| @param conf <code>Configuration</code> for the <code>Tool</code>. |
| @param tool <code>Tool</code> to run. |
| @param args command-line arguments to the tool. |
| @return exit code of the {@link Tool#run(String[])} method.]]> |
| </doc> |
| </method> |
| <method name="run" return="int" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="tool" type="org.apache.hadoop.util.Tool"/> |
| <param name="args" type="java.lang.String[]"/> |
| <exception name="Exception" type="java.lang.Exception"/> |
| <doc> |
| <![CDATA[Runs the <code>Tool</code> with its <code>Configuration</code>. |
| |
| Equivalent to <code>run(tool.getConf(), tool, args)</code>. |
| |
| @param tool <code>Tool</code> to run. |
| @param args command-line arguments to the tool. |
| @return exit code of the {@link Tool#run(String[])} method.]]> |
| </doc> |
| </method> |
| <method name="printGenericCommandUsage" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="out" type="java.io.PrintStream"/> |
| <doc> |
| <![CDATA[Prints generic command-line argurments and usage information. |
| |
| @param out stream to write usage information to.]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[A utility to help run {@link Tool}s. |
| |
| <p><code>ToolRunner</code> can be used to run classes implementing |
| <code>Tool</code> interface. It works in conjunction with |
| {@link GenericOptionsParser} to parse the |
| <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions"> |
| generic hadoop command line arguments</a> and modifies the |
| <code>Configuration</code> of the <code>Tool</code>. The |
| application-specific options are passed along without being modified. |
| </p> |
| |
| @see Tool |
| @see GenericOptionsParser]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.ToolRunner --> |
| <!-- start class org.apache.hadoop.util.VersionInfo --> |
| <class name="VersionInfo" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="VersionInfo" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="getVersion" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the Hadoop version. |
| @return the Hadoop version string, eg. "0.6.3-dev"]]> |
| </doc> |
| </method> |
| <method name="getRevision" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the subversion revision number for the root directory |
| @return the revision number, eg. "451451"]]> |
| </doc> |
| </method> |
| <method name="getDate" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The date that Hadoop was compiled. |
| @return the compilation date in unix date format]]> |
| </doc> |
| </method> |
| <method name="getUser" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[The user that compiled Hadoop. |
| @return the username of the user]]> |
| </doc> |
| </method> |
| <method name="getUrl" return="java.lang.String" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <doc> |
| <![CDATA[Get the subversion URL for the root Hadoop directory.]]> |
| </doc> |
| </method> |
| <method name="main" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="args" type="java.lang.String[]"/> |
| </method> |
| <doc> |
| <![CDATA[This class finds the package info for Hadoop and the HadoopVersionAnnotation |
| information.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.VersionInfo --> |
| <!-- start class org.apache.hadoop.util.XMLUtils --> |
| <class name="XMLUtils" extends="java.lang.Object" |
| abstract="false" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <constructor name="XMLUtils" |
| static="false" final="false" visibility="public" |
| deprecated="not deprecated"> |
| </constructor> |
| <method name="transform" |
| abstract="false" native="false" synchronized="false" |
| static="true" final="false" visibility="public" |
| deprecated="not deprecated"> |
| <param name="styleSheet" type="java.io.InputStream"/> |
| <param name="xml" type="java.io.InputStream"/> |
| <param name="out" type="java.io.Writer"/> |
| <exception name="TransformerConfigurationException" type="javax.xml.transform.TransformerConfigurationException"/> |
| <exception name="TransformerException" type="javax.xml.transform.TransformerException"/> |
| <doc> |
| <![CDATA[Transform input xml given a stylesheet. |
| |
| @param styleSheet the style-sheet |
| @param xml input xml data |
| @param out output |
| @throws TransformerConfigurationException |
| @throws TransformerException]]> |
| </doc> |
| </method> |
| <doc> |
| <![CDATA[General xml utilities.]]> |
| </doc> |
| </class> |
| <!-- end class org.apache.hadoop.util.XMLUtils --> |
| <doc> |
| <![CDATA[Common utilities.]]> |
| </doc> |
| </package> |
| |
| </api> |