blob: 2a3e70eb2c2639ab37ce298dbbcfafb4947b7299 [file] [log] [blame]
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.streaming;
import org.junit.Test;
import static org.junit.Assert.*;
import java.io.*;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.mapreduce.JobContext;
/**
* This class tests hadoopStreaming in MapReduce local mode.
* It uses Hadoop Aggregate to count the numbers of word occurrences
* in the input.
*/
public class TestStreamAggregate
{
protected File INPUT_FILE = new File("stream_aggregate_input.txt");
protected File OUTPUT_DIR = new File("stream_aggregate_out");
protected String input = "roses are red\nviolets are blue\nbunnies are pink\n";
// map parses input lines and generates count entries for each word.
protected String map = StreamUtil.makeJavaCommand(StreamAggregate.class, new String[]{".", "\\n"});
// Use the aggregate combine, reducei to aggregate the counts
protected String outputExpect = "are\t3\nblue\t1\nbunnies\t1\npink\t1\nred\t1\nroses\t1\nviolets\t1\n";
private StreamJob job;
public TestStreamAggregate() throws IOException
{
UtilTest utilTest = new UtilTest(getClass().getName());
utilTest.checkUserDir();
utilTest.redirectIfAntJunit();
}
protected void createInput() throws IOException
{
DataOutputStream out = new DataOutputStream(
new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
out.write(input.getBytes("UTF-8"));
out.close();
}
protected String[] genArgs() {
return new String[] {
"-input", INPUT_FILE.getAbsolutePath(),
"-output", OUTPUT_DIR.getAbsolutePath(),
"-mapper", map,
"-reducer", "aggregate",
//"-verbose",
//"-jobconf", "stream.debug=set"
"-jobconf", JobContext.PRESERVE_FAILED_TASK_FILES + "=true",
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp")
};
}
@Test
public void testCommandLine() throws Exception {
try {
try {
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
} catch (Exception e) {
}
createInput();
boolean mayExit = false;
// During tests, the default Configuration will use a local mapred
// So don't specify -config or -cluster
job = new StreamJob(genArgs(), mayExit);
job.go();
File outFile = new File(OUTPUT_DIR, "part-00000").getAbsoluteFile();
String output = StreamUtil.slurp(outFile);
outFile.delete();
System.err.println("outEx1=" + outputExpect);
System.err.println(" out1=" + output);
assertEquals(outputExpect, output);
} finally {
INPUT_FILE.delete();
FileUtil.fullyDelete(OUTPUT_DIR.getAbsoluteFile());
}
}
public static void main(String[]args) throws Exception
{
new TestStreaming().testCommandLine();
}
}