Merge pull request #870 from igiguere/NUTCH-2971
Fix for NUTCH-2971: Unt tests fail with JDK 17
diff --git a/build.xml b/build.xml
index c4b0b0d..278c19e 100644
--- a/build.xml
+++ b/build.xml
@@ -86,6 +86,10 @@
<ivy:dependencytree />
</target>
+ <target name="dependencytests" depends="resolve-test" description="Show unit tests dependency tree">
+ <ivy:dependencytree />
+ </target>
+
<!-- ====================================================== -->
<!-- Stuff needed by all targets -->
<!-- ====================================================== -->
diff --git a/ivy/ivy.xml b/ivy/ivy.xml
index a68b589..f149ce1 100644
--- a/ivy/ivy.xml
+++ b/ivy/ivy.xml
@@ -122,13 +122,6 @@
<!-- Required for JUnit 5 (Jupiter) test execution -->
<dependency org="org.junit.jupiter" name="junit-jupiter-engine" rev="5.13.4" conf="test->default"/>
<dependency org="org.junit.jupiter" name="junit-jupiter-api" rev="5.13.4" conf="test->default"/>
- <dependency org="org.apache.mrunit" name="mrunit" rev="1.1.0" conf="test->default">
- <artifact name="mrunit" ns0:classifier="hadoop2" />
- <exclude org="log4j" module="log4j" />
- <exclude org="junit" module="junit" />
- <exclude org="org.powermock" module="powermock-module-junit4" />
- <exclude org="com.google.guava" name="guava" />
- </dependency>
<!-- Jetty used to serve test pages for unit tests, but is also provided as dependency of Hadoop -->
<dependency org="org.eclipse.jetty" name="jetty-server" rev="10.0.25" conf="test->default">
diff --git a/src/plugin/build-plugin.xml b/src/plugin/build-plugin.xml
index a4f737e..b0aca71 100755
--- a/src/plugin/build-plugin.xml
+++ b/src/plugin/build-plugin.xml
@@ -282,4 +282,7 @@
<target name="dependencytree" depends="resolve-default" description="Show dependency tree">
<ivy:dependencytree />
</target>
+ <target name="dependencytests" depends="resolve-test" description="Show unit tests dependency tree">
+ <ivy:dependencytree />
+ </target>
</project>
diff --git a/src/plugin/protocol-http/src/test/org/apache/nutch/protocol/http/TestProtocolHttpByProxy.java b/src/plugin/protocol-http/src/test/org/apache/nutch/protocol/http/TestProtocolHttpByProxy.java
index 6a694d1..8a6b08f 100644
--- a/src/plugin/protocol-http/src/test/org/apache/nutch/protocol/http/TestProtocolHttpByProxy.java
+++ b/src/plugin/protocol-http/src/test/org/apache/nutch/protocol/http/TestProtocolHttpByProxy.java
@@ -22,6 +22,7 @@
import org.apache.nutch.protocol.Content;
import org.apache.nutch.protocol.ProtocolOutput;
import org.apache.nutch.protocol.ProtocolStatus;
+import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.littleshoot.proxy.HttpProxyServer;
@@ -30,26 +31,43 @@
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
+import java.io.IOException;
+import java.net.ServerSocket;
+
/**
* Test cases for protocol-http by proxy
*/
public class TestProtocolHttpByProxy extends AbstractHttpProtocolPluginTest {
public static final String PROXY_HOST = "localhost";
- public static final Integer PROXY_PORT = 8888;
+ public Integer proxyPort = 8888;
public static final String TARGET_HOST = "www.baidu.com";
public static final Integer TARGET_PORT = 443;
+
+ private HttpProxyServer server;
@BeforeEach
public void setUp() throws Exception {
super.setUp();
+ proxyPort = findOpenPort();
conf.set("http.proxy.host", PROXY_HOST);
- conf.set("http.proxy.port", PROXY_PORT.toString());
+ conf.set("http.proxy.port", proxyPort.toString());
http.setConf(conf);
- HttpProxyServer server = DefaultHttpProxyServer.bootstrap()
- .withPort(PROXY_PORT).start();
+ server = DefaultHttpProxyServer.bootstrap()
+ .withPort(proxyPort).start();
+ }
+
+ private Integer findOpenPort() throws IOException {
+ try (ServerSocket socket = new ServerSocket(0)) {
+ return socket.getLocalPort();
+ }
+ }
+
+ @AfterEach
+ public void tearDown() {
+ server.stop();
}
@Override
diff --git a/src/test/org/apache/nutch/crawl/CrawlDbUpdateTestDriver.java b/src/test/org/apache/nutch/crawl/CrawlDbUpdateTestDriver.java
deleted file mode 100644
index 544d622..0000000
--- a/src/test/org/apache/nutch/crawl/CrawlDbUpdateTestDriver.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.nutch.crawl;
-
-import java.lang.invoke.MethodHandles;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Reducer;
-import org.apache.hadoop.util.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.mrunit.mapreduce.ReduceDriver;
-import org.apache.hadoop.mrunit.types.Pair;
-
-/**
- * Utility to test transitions of {@link CrawlDatum} states during an update of
- * {@link CrawlDb} (command {@literal updatedb}): call
- * {@link CrawlDbReducer#reduce(Text, Iterator, OutputCollector, Reporter)}
- * (using MRUnit) with the old CrawlDatum (db status) and the new one (fetch
- * status)
- */
-public class CrawlDbUpdateTestDriver<T extends Reducer<Text, CrawlDatum, Text, CrawlDatum>> {
-
- private static final Logger LOG = LoggerFactory
- .getLogger(MethodHandles.lookup().lookupClass());
-
- private ReduceDriver<Text, CrawlDatum, Text, CrawlDatum> reduceDriver;
- private T reducer;
- private Configuration configuration;
-
- public static Text dummyURL = new Text("http://nutch.apache.org/");
-
-// protected CrawlDbUpdateUtilNewAPI(T red, T.Context con) {
- protected CrawlDbUpdateTestDriver(T updateReducer, Configuration conf) {
- reducer = updateReducer;
- configuration = conf;
- }
-
- /**
- * run
- * {@link CrawlDbReducer#reduce(Text, Iterator, OutputCollector, Reporter)}
- * and return the CrawlDatum(s) which would have been written into CrawlDb
- *
- * @param values
- * list of input CrawlDatums
- * @return list of resulting CrawlDatum(s) in CrawlDb
- */
- public List<CrawlDatum> update(List<CrawlDatum> values) {
- List<CrawlDatum> result = new ArrayList<CrawlDatum>(0);
- if (values == null || values.size() == 0) {
- return result;
- }
- Collections.shuffle(values); // sorting of values should have no influence
- reduceDriver = ReduceDriver.newReduceDriver(reducer);
- reduceDriver.getConfiguration().addResource(configuration);
- reduceDriver.withInput(dummyURL, values);
- List<Pair<Text,CrawlDatum>> reduceResult;
- try {
- reduceResult = reduceDriver.run();
- for (Pair<Text,CrawlDatum> p : reduceResult) {
- if (p.getFirst().equals(dummyURL)) {
- result.add(p.getSecond());
- }
- }
- } catch (IOException e) {
- LOG.error(StringUtils.stringifyException(e));
- return result;
- }
- return result;
- }
-
- /**
- * run
- * {@link CrawlDbReducer#reduce(Text, Iterator, OutputCollector, Reporter)}
- * and return the CrawlDatum(s) which would have been written into CrawlDb
- *
- * @param dbDatum
- * previous CrawlDatum in CrawlDb
- * @param fetchDatum
- * CrawlDatum resulting from fetching
- * @return list of resulting CrawlDatum(s) in CrawlDb
- */
- public List<CrawlDatum> update(CrawlDatum dbDatum, CrawlDatum fetchDatum) {
- List<CrawlDatum> values = new ArrayList<CrawlDatum>();
- if (dbDatum != null)
- values.add(dbDatum);
- if (fetchDatum != null)
- values.add(fetchDatum);
- return update(values);
- }
-
- /**
- * see {@link #update(List)}
- */
- public List<CrawlDatum> update(CrawlDatum... values) {
- return update(Arrays.asList(values));
- }
-
-}
diff --git a/src/test/org/apache/nutch/crawl/TestCrawlDbStates.java b/src/test/org/apache/nutch/crawl/TestCrawlDbStates.java
index bae7b29..3474381 100644
--- a/src/test/org/apache/nutch/crawl/TestCrawlDbStates.java
+++ b/src/test/org/apache/nutch/crawl/TestCrawlDbStates.java
@@ -23,6 +23,7 @@
import org.apache.hadoop.util.StringUtils;
import org.apache.nutch.scoring.ScoringFilterException;
import org.apache.nutch.scoring.ScoringFilters;
+import org.apache.nutch.util.ReducerContextWrapper;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -31,8 +32,10 @@
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Date;
+import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import static org.apache.nutch.crawl.CrawlDatum.*;
import static org.junit.jupiter.api.Assertions.fail;
@@ -196,13 +199,13 @@
* already in CrawlDb. Newly injected elements have status "db_unfetched".
* Inject is simulated by calling {@link Injector.InjectReducer#reduce()}.
*/
+ @SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void testCrawlDbStatTransitionInject() {
LOG.info("Test CrawlDatum states in Injector after inject");
Configuration conf = CrawlDBTestUtil.createContext().getConfiguration();
Injector.InjectReducer injector = new Injector.InjectReducer();
- CrawlDbUpdateTestDriver<Injector.InjectReducer> injectDriver =
- new CrawlDbUpdateTestDriver<Injector.InjectReducer>(injector, conf);
+
ScoringFilters scfilters = new ScoringFilters(conf);
for (String sched : schedules) {
LOG.info("Testing inject with {}", sched);
@@ -234,12 +237,29 @@
LOG.error(StringUtils.stringifyException(e));
}
values.add(injected);
- List<CrawlDatum> res = injectDriver.update(values);
- if (res.size() != 1) {
+
+ List<CrawlDatum> result = new ArrayList<CrawlDatum>();
+ Map<Text, CrawlDatum> res = new HashMap<>();
+ ReducerContextWrapper contextWrapper = new ReducerContextWrapper(injector, conf, res);
+ try {
+ injector.setup(contextWrapper.getContext());
+ // test
+ injector.reduce(CrawlDbUpdateUtil.dummyURL, values, contextWrapper.getContext());
+
+ for (Map.Entry<Text, CrawlDatum> e : res.entrySet()) {
+ if (e.getKey().equals(CrawlDbUpdateUtil.dummyURL)) {
+ result.add(e.getValue());
+ }
+ }
+ } catch (IOException | InterruptedException e) {
+ LOG.error(StringUtils.stringifyException(e));
+ }
+
+ if (result.size() != 1) {
fail("Inject didn't result in one single CrawlDatum per URL");
continue;
}
- byte status = res.get(0).getStatus();
+ byte status = result.get(0).getStatus();
if (status != toDbStatus) {
fail("Inject for "
+ (fromDbStatus == -1 ? "" : getStatusName(fromDbStatus)
diff --git a/src/test/org/apache/nutch/indexer/TestIndexerMapReduce.java b/src/test/org/apache/nutch/indexer/TestIndexerMapReduce.java
index c275b19..81dd192 100644
--- a/src/test/org/apache/nutch/indexer/TestIndexerMapReduce.java
+++ b/src/test/org/apache/nutch/indexer/TestIndexerMapReduce.java
@@ -19,9 +19,6 @@
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Reducer;
-import org.apache.hadoop.mrunit.mapreduce.ReduceDriver;
-import org.apache.hadoop.mrunit.types.Pair;
import org.apache.hadoop.util.StringUtils;
import org.apache.nutch.crawl.CrawlDatum;
import org.apache.nutch.crawl.NutchWritable;
@@ -33,6 +30,7 @@
import org.apache.nutch.parse.ParseText;
import org.apache.nutch.protocol.Content;
import org.apache.nutch.util.NutchConfiguration;
+import org.apache.nutch.util.ReducerContextWrapper;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -42,7 +40,9 @@
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -89,8 +89,8 @@
public static CrawlDatum crawlDatumFetchSuccess = new CrawlDatum(
CrawlDatum.STATUS_FETCH_SUCCESS, 60 * 60 * 24);
- private Reducer<Text, NutchWritable, Text, NutchIndexAction> reducer = new IndexerMapReduce.IndexerReducer();
- private ReduceDriver<Text, NutchWritable, Text, NutchIndexAction> reduceDriver;
+ private IndexerMapReduce.IndexerReducer reducer = new IndexerMapReduce.IndexerReducer();
+
private Configuration configuration;
@@ -101,6 +101,9 @@
public void testBinaryContentBase64() {
configuration = NutchConfiguration.create();
configuration.setBoolean(IndexerMapReduce.INDEXER_BINARY_AS_BASE64, true);
+
+ // unrelated issue with "index.jexl.filter", don't use all plugins. Ref: src/test/nutch-site.xml
+ configuration.set("plugin.includes", "protocol-http|urlfilter-regex|parse-(html|tika)|index-(basic|anchor)|indexer-csv|scoring-opic|urlnormalizer-(pass|regex|basic)");
Charset[] testCharsets = { StandardCharsets.UTF_8,
Charset.forName("iso-8859-1"), Charset.forName("iso-8859-2") };
@@ -155,7 +158,10 @@
* @param content
* (optional, if index binary content) protocol content
* @return "indexed" document
+ * @throws InterruptedException
+ * @throws IOException
*/
+ @SuppressWarnings({ "unchecked", "rawtypes" })
public NutchDocument runIndexer(CrawlDatum dbDatum, CrawlDatum fetchDatum,
ParseText parseText, ParseData parseData, Content content) {
List<NutchWritable> values = new ArrayList<NutchWritable>();
@@ -164,19 +170,20 @@
values.add(new NutchWritable(parseText));
values.add(new NutchWritable(parseData));
values.add(new NutchWritable(content));
- reduceDriver = ReduceDriver.newReduceDriver(reducer);
- reduceDriver.getConfiguration().addResource(configuration);
- reduceDriver.withInput(testUrlText, values);
- List<Pair<Text, NutchIndexAction>> reduceResult;
+ Map<Text, NutchIndexAction> reduceResult = new HashMap<>();
+ ReducerContextWrapper contextWrapper = new ReducerContextWrapper(reducer, configuration, reduceResult);
NutchDocument doc = null;
- try {
- reduceResult = reduceDriver.run();
- for (Pair<Text, NutchIndexAction> p : reduceResult) {
- if (p.getSecond().action != NutchIndexAction.DELETE) {
- doc = p.getSecond().doc;
+ try {
+ reducer.setup(contextWrapper.getContext());
+ // test
+ reducer.reduce(testUrlText, values, contextWrapper.getContext());
+
+ for (Map.Entry<Text, NutchIndexAction> e : reduceResult.entrySet()) {
+ if (e.getValue().action != NutchIndexAction.DELETE) {
+ doc = e.getValue().doc;
}
}
- } catch (IOException e) {
+ } catch (IOException | InterruptedException e) {
LOG.error(StringUtils.stringifyException(e));
}
return doc;
diff --git a/src/test/org/apache/nutch/util/ReducerContextWrapper.java b/src/test/org/apache/nutch/util/ReducerContextWrapper.java
new file mode 100644
index 0000000..196116c
--- /dev/null
+++ b/src/test/org/apache/nutch/util/ReducerContextWrapper.java
@@ -0,0 +1,407 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nutch.util;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configuration.IntegerRanges;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.RawComparator;
+import org.apache.hadoop.mapred.Counters;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.hadoop.mapreduce.Partitioner;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.security.Credentials;
+
+/**
+ * This class wraps an implementation of {@link Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context}, to be used in unit tests,
+ * for example: TestIndexerMapReduce, TestCrawlDbStates.testCrawlDbStatTransitionInject.
+ *
+ * @param <KEYIN> Type of input keys
+ * @param <VALUEIN> Type of input values
+ * @param <KEYOUT> Type of output keys
+ * @param <VALUEOUT> Type of output values
+ */
+public class ReducerContextWrapper<KEYIN, VALUEIN, KEYOUT, VALUEOUT> {
+
+ private Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> reducer;
+ private Configuration config;
+ private Counters counters;
+ private Map<KEYIN, VALUEIN> valuesIn;
+ private Map<KEYOUT, VALUEOUT> valuesOut;
+
+ private int valuesIndex;
+ private KEYIN currentKey;
+ private VALUEIN currentValue;
+
+ private Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context context;
+
+ private String status;
+
+ public ReducerContextWrapper() {
+ counters = new Counters();
+ valuesIn = new HashMap<>();
+ valuesIndex = 0;
+ }
+
+ /**
+ * Constructs a ReducerContextWrapper
+ *
+ * @param reducer The reducer on which to implement the wrapped Reducer.Context
+ * @param config The configuration to inject in the wrapped Reducer.Context
+ * @param valuesOut The output values to fill (to fake the Hadoop process)
+ */
+ public ReducerContextWrapper(Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT> reducer, Configuration config, Map<KEYOUT, VALUEOUT> valuesOut) {
+ this();
+ this.config = config;
+ this.reducer = reducer;
+ this.valuesOut = valuesOut;
+ initContext();
+ }
+
+ /**
+ * Return the wrapped Reducer.Context to be used in calls to Reducer.setup and Reducer.reduce, in unit test
+ * @return
+ */
+ public Reducer<KEYIN, VALUEIN, KEYOUT, VALUEOUT>.Context getContext() {
+ return context;
+ }
+
+ private void initContext() {
+ // most methods are not used in Nutch unit tests.
+ context = reducer.new Context() {
+
+ @Override
+ public KEYIN getCurrentKey() throws IOException, InterruptedException {
+ return currentKey;
+ }
+
+ @Override
+ public VALUEIN getCurrentValue() throws IOException, InterruptedException {
+ return currentValue;
+ }
+
+ @Override
+ public boolean nextKeyValue() throws IOException, InterruptedException {
+ return valuesIndex < valuesIn.size();
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public void write(Object arg0, Object arg1)
+ throws IOException, InterruptedException {
+ valuesOut.put((KEYOUT) arg0, (VALUEOUT) arg1);
+ currentKey = (KEYIN) arg0;
+ currentValue = (VALUEIN) arg1;
+ valuesIndex++;
+ }
+
+ @Override
+ public Counter getCounter(Enum<?> arg0) {
+ return counters.findCounter(arg0);
+ }
+
+ @Override
+ public Counter getCounter(String arg0, String arg1) {
+ return counters.findCounter(arg0, arg1);
+ }
+
+ @Override
+ public float getProgress() {
+ return valuesIndex;
+ }
+
+ @Override
+ public String getStatus() {
+ return status;
+ }
+
+ @Override
+ public void setStatus(String arg0) {
+ status = arg0;
+ }
+
+ @Override
+ public Configuration getConfiguration() {
+ return config;
+ }
+
+ @Override
+ public Iterable<VALUEIN> getValues()
+ throws IOException, InterruptedException {
+ return valuesIn.values();
+ }
+
+ @Override
+ public boolean nextKey() throws IOException, InterruptedException {
+ return valuesIndex < valuesIn.size();
+ }
+
+ @Override
+ public OutputCommitter getOutputCommitter() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public TaskAttemptID getTaskAttemptID() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public Path[] getArchiveClassPaths() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public String[] getArchiveTimestamps() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public URI[] getCacheArchives() throws IOException {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public URI[] getCacheFiles() throws IOException {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public Class<? extends Reducer<?, ?, ?, ?>> getCombinerClass()
+ throws ClassNotFoundException {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public RawComparator<?> getCombinerKeyGroupingComparator() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public Credentials getCredentials() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public Path[] getFileClassPaths() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public String[] getFileTimestamps() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public RawComparator<?> getGroupingComparator() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public Class<? extends InputFormat<?, ?>> getInputFormatClass()
+ throws ClassNotFoundException {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public String getJar() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public JobID getJobID() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public String getJobName() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public boolean getJobSetupCleanupNeeded() {
+ // Auto-generated
+ return false;
+ }
+
+ @Override
+ public Path[] getLocalCacheArchives() throws IOException {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public Path[] getLocalCacheFiles() throws IOException {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public Class<?> getMapOutputKeyClass() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public Class<?> getMapOutputValueClass() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public Class<? extends Mapper<?, ?, ?, ?>> getMapperClass()
+ throws ClassNotFoundException {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public int getMaxMapAttempts() {
+ // Auto-generated
+ return 0;
+ }
+
+ @Override
+ public int getMaxReduceAttempts() {
+ // Auto-generated
+ return 0;
+ }
+
+ @Override
+ public int getNumReduceTasks() {
+ // Auto-generated
+ return 0;
+ }
+
+ @Override
+ public Class<? extends OutputFormat<?, ?>> getOutputFormatClass()
+ throws ClassNotFoundException {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public Class<?> getOutputKeyClass() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public Class<?> getOutputValueClass() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public Class<? extends Partitioner<?, ?>> getPartitionerClass()
+ throws ClassNotFoundException {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public boolean getProfileEnabled() {
+ // Auto-generated
+ return false;
+ }
+
+ @Override
+ public String getProfileParams() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public IntegerRanges getProfileTaskRange(boolean arg0) {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public Class<? extends Reducer<?, ?, ?, ?>> getReducerClass()
+ throws ClassNotFoundException {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public RawComparator<?> getSortComparator() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public boolean getSymlink() {
+ // Auto-generated
+ return false;
+ }
+
+ @Override
+ public boolean getTaskCleanupNeeded() {
+ // Auto-generated
+ return false;
+ }
+
+ @Override
+ public String getUser() {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public Path getWorkingDirectory() throws IOException {
+ // Auto-generated
+ return null;
+ }
+
+ @Override
+ public void progress() {
+ // Auto-generated
+ }
+ };
+
+ }
+
+
+}