blob: df73abc900a1746f275df3ca528d1969cdb4e17f [file] [log] [blame]
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.orc.tools;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
import org.apache.orc.OrcFile;
import org.apache.orc.TypeDescription;
import org.apache.orc.Writer;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class TesScanData {
private Path workDir = new Path(System.getProperty("test.tmp.dir"));
private Configuration conf;
private FileSystem fs;
private Path testFilePath;
@BeforeEach
public void openFileSystem() throws Exception {
conf = new Configuration();
fs = FileSystem.getLocal(conf);
fs.setWorkingDirectory(workDir);
testFilePath = new Path("TesScanData.testScan.orc");
fs.delete(testFilePath, false);
}
@Test
public void testScan() throws Exception {
TypeDescription schema = TypeDescription.fromString("struct<x:int,y:string>");
Writer writer = OrcFile.createWriter(testFilePath,
OrcFile.writerOptions(conf)
.setSchema(schema));
VectorizedRowBatch batch = schema.createRowBatch();
LongColumnVector x = (LongColumnVector) batch.cols[0];
BytesColumnVector y = (BytesColumnVector) batch.cols[1];
for (int r = 0; r < 10000; ++r) {
int row = batch.size++;
x.vector[row] = r;
byte[] buffer = ("byte-" + r).getBytes();
y.setRef(row, buffer, 0, buffer.length);
if (batch.size == batch.getMaxSize()) {
writer.addRowBatch(batch);
batch.reset();
}
}
if (batch.size != 0) {
writer.addRowBatch(batch);
}
writer.close();
PrintStream origOut = System.out;
ByteArrayOutputStream myOut = new ByteArrayOutputStream();
// replace stdout and run command
System.setOut(new PrintStream(myOut, false, StandardCharsets.UTF_8));
ScanData.main(conf, new String[]{"--schema", testFilePath.toString()});
System.out.flush();
System.setOut(origOut);
String output = myOut.toString(StandardCharsets.UTF_8);
assertTrue(output.contains("{\"category\": \"struct\", \"id\": 0, \"max\": 2, \"fields\": [\n" +
"{ \"x\": {\"category\": \"int\", \"id\": 1, \"max\": 1}},\n" +
"{ \"y\": {\"category\": \"string\", \"id\": 2, \"max\": 2}}]}"));
assertTrue(output.contains("File: TesScanData.testScan.orc, bad batches: 0, rows: 10000/10000"));
}
}