blob: 8405cd84b5053e1938d9b9b600472c0354777781 [file]
/*
* Copyright (c) 2013 DataTorrent, Inc. ALL Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datatorrent.lib.stream;
import org.junit.Assert;
import org.junit.Test;
import com.datatorrent.lib.testbench.CountTestSink;
/**
* Performance test for {@link com.datatorrent.lib.stream.StreamMerger}<p>
* Benchmarks: Currently does about 3 Million tuples/sec in debugging environment. Need to test on larger nodes<br>
* <br>
*/
public class StreamMergerTest
{
/**
* Test oper pass through. The Object passed is not relevant
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
@Test
public void testNodeProcessing() throws Exception
{
StreamMerger oper = new StreamMerger();
CountTestSink mergeSink = new CountTestSink();
oper.out.setSink(mergeSink);
oper.beginWindow(0);
int numtuples = 500;
Integer input = new Integer(0);
// Same input object can be used as the oper is just pass through
for (int i = 0; i < numtuples; i++) {
oper.data1.process(input);
oper.data2.process(input);
}
oper.endWindow();
Assert.assertEquals("number emitted tuples", numtuples*2, mergeSink.count);
}
}