Miscellaneous improvements to code (#122)
* making variables final
* simplifying logic
* adding resources to try-with-resources blocks (might be helpful to ignore white space while reviewing because of this one)
* removed unthrown exceptions from methods
diff --git a/README.md b/README.md
index f2b23a0..b7f1110 100644
--- a/README.md
+++ b/README.md
@@ -48,13 +48,13 @@
cp conf/env.sh.example conf/env.sh
vim conf/env.sh
-3. Build the examples repo and copy the examples jar to Accumulo's `lib/` directory to get on its
+4. Build the examples repo and copy the examples jar to Accumulo's `lib/` directory to get on its
class path:
./bin/build
cp target/accumulo-examples.jar /path/to/accumulo/lib/
-4. Each Accumulo example has its own documentation and instructions for running the example which
+5. Each Accumulo example has its own documentation and instructions for running the example which
are linked to below.
When running the examples, remember the tips below:
diff --git a/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java b/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java
index bc1001b..3c45614 100644
--- a/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java
+++ b/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java
@@ -16,6 +16,7 @@
*/
package org.apache.accumulo.examples.bloom;
+import java.util.HashMap;
import java.util.Map;
import java.util.Random;
@@ -48,23 +49,21 @@
opts.parseArgs(BloomFilters.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
- createTableAndSetCompactionRatio(client, BloomCommon.BLOOM_TEST1_TABLE);
- createTableAndSetCompactionRatio(client, BloomCommon.BLOOM_TEST2_TABLE);
- client.tableOperations().setProperty(BloomCommon.BLOOM_TEST2_TABLE,
- BloomCommon.BLOOM_ENABLED_PROPERTY, "true");
+ Map<String,String> table1props = Map.of("table.compaction.major.ratio", "7");
+
+ Map<String,String> table2props = new HashMap<>(table1props);
+ table2props.put(BloomCommon.BLOOM_ENABLED_PROPERTY, "true");
+
+ Common.createTableWithNamespace(client, BloomCommon.BLOOM_TEST1_TABLE,
+ new NewTableConfiguration().setProperties(table1props));
+ Common.createTableWithNamespace(client, BloomCommon.BLOOM_TEST2_TABLE,
+ new NewTableConfiguration().setProperties(table2props));
+
writeAndFlushData(BloomCommon.BLOOM_TEST1_TABLE, client);
writeAndFlushData(BloomCommon.BLOOM_TEST2_TABLE, client);
}
}
- private static void createTableAndSetCompactionRatio(AccumuloClient client,
- final String tableName) throws AccumuloException, AccumuloSecurityException {
- log.info("Creating {}", tableName);
- Map<String,String> props = Map.of("table.compaction.major.ratio", "7");
- var newTableConfig = new NewTableConfiguration().setProperties(props);
- Common.createTableWithNamespace(client, tableName, newTableConfig);
- }
-
// Write a million rows 3 times flushing files to disk separately
private static void writeAndFlushData(final String tableName, final AccumuloClient client)
throws TableNotFoundException, AccumuloSecurityException, AccumuloException {
diff --git a/src/main/java/org/apache/accumulo/examples/client/CountingVerifyingReceiver.java b/src/main/java/org/apache/accumulo/examples/client/CountingVerifyingReceiver.java
index 1744795..814a4f5 100644
--- a/src/main/java/org/apache/accumulo/examples/client/CountingVerifyingReceiver.java
+++ b/src/main/java/org/apache/accumulo/examples/client/CountingVerifyingReceiver.java
@@ -34,7 +34,7 @@
long count = 0;
int expectedValueSize = 0;
- HashMap<String,Boolean> expectedRows;
+ final HashMap<String,Boolean> expectedRows;
CountingVerifyingReceiver(HashMap<String,Boolean> expectedRows, int expectedValueSize) {
this.expectedRows = expectedRows;
diff --git a/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java b/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java
index 2038b64..eccc0a3 100644
--- a/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java
+++ b/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java
@@ -125,10 +125,10 @@
opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
if ((opts.max - opts.min) < 1L * opts.num) { // right-side multiplied by 1L to convert to long
// in a way that doesn't trigger FindBugs
- System.err.println(String.format(
+ System.err.printf(
"You must specify a min and a max that allow for at least num possible values. "
- + "For example, you requested %d rows, but a min of %d and a max of %d (exclusive), which only allows for %d rows.",
- opts.num, opts.min, opts.max, (opts.max - opts.min)));
+ + "For example, you requested %d rows, but a min of %d and a max of %d (exclusive), which only allows for %d rows.%n",
+ opts.num, opts.min, opts.max, (opts.max - opts.min));
System.exit(1);
}
Random r;
@@ -158,11 +158,7 @@
HashMap<String,Set<SecurityErrorCode>> tables = new HashMap<>();
for (Entry<TabletId,Set<SecurityErrorCode>> ke : e.getSecurityErrorCodes().entrySet()) {
String tableId = ke.getKey().getTable().toString();
- Set<SecurityErrorCode> secCodes = tables.get(tableId);
- if (secCodes == null) {
- secCodes = new HashSet<>();
- tables.put(tableId, secCodes);
- }
+ Set<SecurityErrorCode> secCodes = tables.computeIfAbsent(tableId, k -> new HashSet<>());
secCodes.addAll(ke.getValue());
}
System.err.println("ERROR : Not authorized to write to tables : " + tables);
diff --git a/src/main/java/org/apache/accumulo/examples/client/TracingExample.java b/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
index 1a13f7a..3a871a4 100644
--- a/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
+++ b/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
@@ -18,18 +18,14 @@
package org.apache.accumulo.examples.client;
import java.time.Instant;
-import java.util.Map.Entry;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.examples.Common;
import org.apache.accumulo.examples.cli.ClientOnDefaultTable;
@@ -76,8 +72,8 @@
this.tracer = GlobalOpenTelemetry.get().getTracer(TracingExample.class.getSimpleName());
}
- private void execute(Opts opts) throws TableNotFoundException, AccumuloException,
- AccumuloSecurityException, TableExistsException {
+ private void execute(Opts opts)
+ throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
Span span = tracer.spanBuilder("trace example").startSpan();
try (Scope scope = span.makeCurrent()) {
@@ -131,11 +127,8 @@
// Trace the read operation.
Span span = tracer.spanBuilder("readEntries").startSpan();
try (Scope scope = span.makeCurrent()) {
- int numberOfEntriesRead = 0;
- for (Entry<Key,Value> entry : scanner) {
- System.out.println(entry.getKey().toString() + " -> " + entry.getValue().toString());
- ++numberOfEntriesRead;
- }
+ long numberOfEntriesRead = scanner.stream().peek(entry -> System.out
+ .println(entry.getKey().toString() + " -> " + entry.getValue().toString())).count();
// You can add additional metadata (key, values) to Spans
span.setAttribute("Number of Entries Read", numberOfEntriesRead);
} finally {
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java b/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java
index 83b449f..939ac75 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java
@@ -58,7 +58,7 @@
int recusiveFileCount = 0;
void set(Value val) {
- String sa[] = val.toString().split(",");
+ String[] sa = val.toString().split(",");
dirCount = Integer.parseInt(sa[0]);
fileCount = Integer.parseInt(sa[1]);
recursiveDirCount = Integer.parseInt(sa[2]);
@@ -246,7 +246,7 @@
}
public FileCount(AccumuloClient client, String tableName, Authorizations auths,
- ColumnVisibility cv, ScannerOpts scanOpts, BatchWriterOpts bwOpts) throws Exception {
+ ColumnVisibility cv, ScannerOpts scanOpts, BatchWriterOpts bwOpts) {
this.client = client;
this.tableName = tableName;
this.auths = auths;
@@ -260,31 +260,30 @@
entriesScanned = 0;
inserts = 0;
- Scanner scanner = client.createScanner(tableName, auths);
- scanner.setBatchSize(scanOpts.scanBatchSize);
- BatchWriter bw = client.createBatchWriter(tableName, bwOpts.getBatchWriterConfig());
+ try (Scanner scanner = client.createScanner(tableName, auths);
+ BatchWriter bw = client.createBatchWriter(tableName, bwOpts.getBatchWriterConfig())) {
+ scanner.setBatchSize(scanOpts.scanBatchSize);
- long t1 = System.currentTimeMillis();
+ long t1 = System.currentTimeMillis();
- int depth = findMaxDepth(scanner);
+ int depth = findMaxDepth(scanner);
- long t2 = System.currentTimeMillis();
+ long t2 = System.currentTimeMillis();
- for (int d = depth; d > 0; d--) {
- calculateCounts(scanner, d, bw);
- // must flush so next depth can read what prev depth wrote
- bw.flush();
+ for (int d = depth; d > 0; d--) {
+ calculateCounts(scanner, d, bw);
+ // must flush so next depth can read what prev depth wrote
+ bw.flush();
+ }
+
+ long t3 = System.currentTimeMillis();
+
+ System.out.printf("Max depth : %d%n", depth);
+ System.out.printf("Time to find max depth : %,d ms%n", (t2 - t1));
+ System.out.printf("Time to compute counts : %,d ms%n", (t3 - t2));
+ System.out.printf("Entries scanned : %,d %n", entriesScanned);
+ System.out.printf("Counts inserted : %,d %n", inserts);
}
-
- bw.close();
-
- long t3 = System.currentTimeMillis();
-
- System.out.printf("Max depth : %d%n", depth);
- System.out.printf("Time to find max depth : %,d ms%n", (t2 - t1));
- System.out.printf("Time to compute counts : %,d ms%n", (t3 - t2));
- System.out.printf("Entries scanned : %,d %n", entriesScanned);
- System.out.printf("Counts inserted : %,d %n", inserts);
}
public static class Opts extends ClientOnRequiredTable {
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java b/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
index ec4e712..c679ba2 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
@@ -168,26 +168,27 @@
Common.createTableWithNamespace(client, opts.indexTable);
Common.createTableWithNamespace(client, opts.dataTable, newTableConfig);
- BatchWriter dirBW = client.createBatchWriter(opts.dirTable, bwOpts.getBatchWriterConfig());
- BatchWriter indexBW = client.createBatchWriter(opts.indexTable,
- bwOpts.getBatchWriterConfig());
- BatchWriter dataBW = client.createBatchWriter(opts.dataTable, bwOpts.getBatchWriterConfig());
- FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
- for (String dir : opts.directories) {
- recurse(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
+ try (
+ BatchWriter dirBW = client.createBatchWriter(opts.dirTable,
+ bwOpts.getBatchWriterConfig());
+ BatchWriter indexBW = client.createBatchWriter(opts.indexTable,
+ bwOpts.getBatchWriterConfig());
+ BatchWriter dataBW = client.createBatchWriter(opts.dataTable,
+ bwOpts.getBatchWriterConfig())) {
+ FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
+ for (String dir : opts.directories) {
+ recurse(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
- // fill in parent directory info
- int slashIndex;
- while ((slashIndex = dir.lastIndexOf('/')) > 0) {
- dir = dir.substring(0, slashIndex);
- ingest(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
+ // fill in parent directory info
+ int slashIndex;
+ while ((slashIndex = dir.lastIndexOf('/')) > 0) {
+ dir = dir.substring(0, slashIndex);
+ ingest(new File(dir), opts.visibility, dirBW, indexBW, fdi, dataBW);
+ }
}
- }
- ingest(new File("/"), opts.visibility, dirBW, indexBW, fdi, dataBW);
+ ingest(new File("/"), opts.visibility, dirBW, indexBW, fdi, dataBW);
- dirBW.close();
- indexBW.close();
- dataBW.close();
+ }
}
}
}
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java b/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java
index 7be97de..64ac4a5 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java
@@ -129,8 +129,8 @@
*/
public static String getType(Text colf) {
if (colf.equals(DIR_COLF))
- return colf.toString() + ":";
- return Long.toString(Ingest.encoder.decode(colf.getBytes())) + ":";
+ return colf + ":";
+ return Ingest.encoder.decode(colf.getBytes()) + ":";
}
/**
@@ -142,14 +142,15 @@
public Map<String,String> getData(String path) throws TableNotFoundException {
if (path.endsWith("/"))
path = path.substring(0, path.length() - 1);
- Scanner scanner = client.createScanner(tableName, auths);
- scanner.setRange(new Range(getRow(path)));
Map<String,String> data = new TreeMap<>();
- for (Entry<Key,Value> e : scanner) {
- String type = getType(e.getKey().getColumnFamily());
- data.put("fullname", e.getKey().getRow().toString().substring(3));
- data.put(type + e.getKey().getColumnQualifier().toString() + ":"
- + e.getKey().getColumnVisibility().toString(), new String(e.getValue().get()));
+ try (Scanner scanner = client.createScanner(tableName, auths)) {
+ scanner.setRange(new Range(getRow(path)));
+ for (Entry<Key,Value> e : scanner) {
+ String type = getType(e.getKey().getColumnFamily());
+ data.put("fullname", e.getKey().getRow().toString().substring(3));
+ data.put(type + e.getKey().getColumnQualifier().toString() + ":"
+ + e.getKey().getColumnVisibility().toString(), new String(e.getValue().get()));
+ }
}
return data;
}
@@ -164,18 +165,19 @@
if (!path.endsWith("/"))
path = path + "/";
Map<String,Map<String,String>> fim = new TreeMap<>();
- Scanner scanner = client.createScanner(tableName, auths);
- scanner.setRange(Range.prefix(getRow(path)));
- for (Entry<Key,Value> e : scanner) {
- String name = e.getKey().getRow().toString();
- name = name.substring(name.lastIndexOf("/") + 1);
- String type = getType(e.getKey().getColumnFamily());
- if (!fim.containsKey(name)) {
- fim.put(name, new TreeMap<>());
- fim.get(name).put("fullname", e.getKey().getRow().toString().substring(3));
+ try (Scanner scanner = client.createScanner(tableName, auths)) {
+ scanner.setRange(Range.prefix(getRow(path)));
+ for (Entry<Key,Value> e : scanner) {
+ String name = e.getKey().getRow().toString();
+ name = name.substring(name.lastIndexOf("/") + 1);
+ String type = getType(e.getKey().getColumnFamily());
+ if (!fim.containsKey(name)) {
+ fim.put(name, new TreeMap<>());
+ fim.get(name).put("fullname", e.getKey().getRow().toString().substring(3));
+ }
+ fim.get(name).put(type + e.getKey().getColumnQualifier().toString() + ":"
+ + e.getKey().getColumnVisibility().toString(), new String(e.getValue().get()));
}
- fim.get(name).put(type + e.getKey().getColumnQualifier().toString() + ":"
- + e.getKey().getColumnVisibility().toString(), new String(e.getValue().get()));
}
return fim;
}
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java b/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java
index 6cfe513..3d8d1b3 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java
@@ -53,7 +53,7 @@
JTree tree;
DefaultTreeModel treeModel;
- QueryUtil q;
+ final QueryUtil q;
FileDataQuery fdq;
String topPath;
Map<String,DefaultMutableTreeNode> nodeNameMap;
@@ -120,7 +120,6 @@
}
public void populateChildren(DefaultMutableTreeNode node) throws TableNotFoundException {
- @SuppressWarnings("unchecked")
Enumeration<TreeNode> children = node.children();
while (children.hasMoreElements()) {
populate((DefaultMutableTreeNode) children.nextElement());
@@ -176,7 +175,6 @@
@Override
public void treeCollapsed(TreeExpansionEvent event) {
DefaultMutableTreeNode node = (DefaultMutableTreeNode) event.getPath().getLastPathComponent();
- @SuppressWarnings("unchecked")
Enumeration<TreeNode> children = node.children();
while (children.hasMoreElements()) {
DefaultMutableTreeNode child = (DefaultMutableTreeNode) children.nextElement();
diff --git a/src/main/java/org/apache/accumulo/examples/filedata/ChunkInputStream.java b/src/main/java/org/apache/accumulo/examples/filedata/ChunkInputStream.java
index 5c40101..a455b08 100644
--- a/src/main/java/org/apache/accumulo/examples/filedata/ChunkInputStream.java
+++ b/src/main/java/org/apache/accumulo/examples/filedata/ChunkInputStream.java
@@ -143,8 +143,7 @@
}
if (gotEndMarker) {
- log.debug("got another chunk after end marker: " + currentKey.toString() + " "
- + thisKey.toString());
+ log.debug("got another chunk after end marker: " + currentKey.toString() + " " + thisKey);
clear();
throw new IOException("found extra chunk after end marker");
}
@@ -152,8 +151,8 @@
// got new chunk of the same file, check that it's the next chunk
int thisChunk = FileDataIngest.bytesToInt(thisKey.getColumnQualifier().getBytes(), 4);
if (thisChunk != currentChunk + 1) {
- log.debug("new chunk same file, unexpected chunkID: " + currentKey.toString() + " "
- + thisKey.toString());
+ log.debug(
+ "new chunk same file, unexpected chunkID: " + currentKey.toString() + " " + thisKey);
clear();
throw new IOException("missing chunks between " + currentChunk + " and " + thisChunk);
}
@@ -226,7 +225,7 @@
avail = count - pos;
}
- int cnt = (avail < len - total) ? avail : len - total;
+ int cnt = Math.min(avail, len - total);
log.debug("copying from local buffer: local pos " + pos + " into pos " + off + " len " + cnt);
System.arraycopy(buf, pos, b, off, cnt);
pos += cnt;
diff --git a/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java b/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
index 0bca7ec..c4ef7e4 100644
--- a/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
+++ b/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
@@ -58,9 +58,9 @@
public static final String TABLE_EXISTS_MSG = "Table already exists. User may wish to delete existing "
+ "table and re-run example. Table name: ";
- int chunkSize;
- byte[] chunkSizeBytes;
- byte[] buf;
+ final int chunkSize;
+ final byte[] chunkSizeBytes;
+ final byte[] buf;
MessageDigest md5digest;
ColumnVisibility cv;
@@ -85,10 +85,8 @@
// read through file once, calculating hashes
md5digest.reset();
- InputStream fis = null;
int numRead = 0;
- try {
- fis = new FileInputStream(filename);
+ try (InputStream fis = new FileInputStream(filename)) {
numRead = fis.read(buf);
while (numRead >= 0) {
if (numRead > 0) {
@@ -96,10 +94,6 @@
}
numRead = fis.read(buf);
}
- } finally {
- if (fis != null) {
- fis.close();
- }
}
String row = hexString(md5digest.digest());
@@ -115,8 +109,7 @@
// read through file again, writing chunks to accumulo
int chunkCount = 0;
- try {
- fis = new FileInputStream(filename);
+ try (InputStream fis = new FileInputStream(filename)) {
numRead = fis.read(buf);
while (numRead >= 0) {
while (numRead < buf.length) {
@@ -137,10 +130,6 @@
chunkCount++;
numRead = fis.read(buf);
}
- } finally {
- if (fis != null) {
- fis.close();
- }
}
m = new Mutation(row);
Text chunkCQ = new Text(chunkSizeBytes);
@@ -153,9 +142,8 @@
public static int bytesToInt(byte[] b, int offset) {
if (b.length <= offset + 3)
throw new NumberFormatException("couldn't pull integer from bytes at offset " + offset);
- int i = (((b[offset] & 255) << 24) + ((b[offset + 1] & 255) << 16)
- + ((b[offset + 2] & 255) << 8) + ((b[offset + 3] & 255) << 0));
- return i;
+ return (((b[offset] & 255) << 24) + ((b[offset + 1] & 255) << 16) + ((b[offset + 2] & 255) << 8)
+ + ((b[offset + 3] & 255)));
}
public static byte[] intToBytes(int l) {
@@ -163,12 +151,12 @@
b[0] = (byte) (l >>> 24);
b[1] = (byte) (l >>> 16);
b[2] = (byte) (l >>> 8);
- b[3] = (byte) (l >>> 0);
+ b[3] = (byte) (l);
return b;
}
private static String getExt(String filename) {
- if (filename.indexOf(".") == -1)
+ if (!filename.contains("."))
return null;
return filename.substring(filename.lastIndexOf(".") + 1);
}
diff --git a/src/main/java/org/apache/accumulo/examples/filedata/FileDataQuery.java b/src/main/java/org/apache/accumulo/examples/filedata/FileDataQuery.java
index 04a2a8d..3fbc333 100644
--- a/src/main/java/org/apache/accumulo/examples/filedata/FileDataQuery.java
+++ b/src/main/java/org/apache/accumulo/examples/filedata/FileDataQuery.java
@@ -22,8 +22,6 @@
import java.util.Map.Entry;
import org.apache.accumulo.core.client.AccumuloClient;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Key;
@@ -39,12 +37,12 @@
* {@link org.apache.accumulo.examples.dirlist.Viewer}. See README.dirlist for instructions.
*/
public class FileDataQuery {
- List<Entry<Key,Value>> lastRefs;
+ final List<Entry<Key,Value>> lastRefs;
private final ChunkInputStream cis;
Scanner scanner;
public FileDataQuery(AccumuloClient client, String tableName, Authorizations auths)
- throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+ throws TableNotFoundException {
lastRefs = new ArrayList<>();
cis = new ChunkInputStream();
scanner = client.createScanner(tableName, auths);
diff --git a/src/main/java/org/apache/accumulo/examples/filedata/VisibilityCombiner.java b/src/main/java/org/apache/accumulo/examples/filedata/VisibilityCombiner.java
index a3534ef..3e6f545 100644
--- a/src/main/java/org/apache/accumulo/examples/filedata/VisibilityCombiner.java
+++ b/src/main/java/org/apache/accumulo/examples/filedata/VisibilityCombiner.java
@@ -63,9 +63,6 @@
}
private void insert(ByteSequence cv) {
- for (int i = 0; i < cv.length(); i++) {
-
- }
String cvs = cv.toString();
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/TeraSortIngest.java b/src/main/java/org/apache/accumulo/examples/mapreduce/TeraSortIngest.java
index 71b8d5c..c5b7605 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/TeraSortIngest.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/TeraSortIngest.java
@@ -87,12 +87,12 @@
}
@Override
- public long getLength() throws IOException {
+ public long getLength() {
return 0;
}
@Override
- public String[] getLocations() throws IOException {
+ public String[] getLocations() {
return new String[] {};
}
@@ -113,9 +113,9 @@
* A record reader that will generate a range of numbers.
*/
static class RangeRecordReader extends RecordReader<LongWritable,NullWritable> {
- long startRow;
+ final long startRow;
long finishedRows;
- long totalRows;
+ final long totalRows;
public RangeRecordReader(RangeInputSplit split) {
startRow = split.firstRow;
@@ -127,26 +127,25 @@
public void close() throws IOException {}
@Override
- public float getProgress() throws IOException {
+ public float getProgress() {
return finishedRows / (float) totalRows;
}
@Override
- public LongWritable getCurrentKey() throws IOException, InterruptedException {
+ public LongWritable getCurrentKey() {
return new LongWritable(startRow + finishedRows);
}
@Override
- public NullWritable getCurrentValue() throws IOException, InterruptedException {
+ public NullWritable getCurrentValue() {
return NullWritable.get();
}
@Override
- public void initialize(InputSplit split, TaskAttemptContext context)
- throws IOException, InterruptedException {}
+ public void initialize(InputSplit split, TaskAttemptContext context) {}
@Override
- public boolean nextKeyValue() throws IOException, InterruptedException {
+ public boolean nextKeyValue() {
if (finishedRows < totalRows) {
++finishedRows;
return true;
@@ -157,7 +156,7 @@
@Override
public RecordReader<LongWritable,NullWritable> createRecordReader(InputSplit split,
- TaskAttemptContext context) throws IOException {
+ TaskAttemptContext context) {
return new RangeRecordReader((RangeInputSplit) split);
}
@@ -188,7 +187,7 @@
static class RandomGenerator {
private long seed = 0;
- private static final long mask32 = (1l << 32) - 1;
+ private static final long mask32 = (1L << 32) - 1;
/**
* The number of iterations separating the precomputed seeds.
*/
@@ -240,7 +239,6 @@
private final Text key = new Text();
private final Text value = new Text();
private RandomGenerator rand;
- private byte[] keyBytes; // = new byte[12];
private final String spaces = " ";
private final byte[][] filler = new byte[26][];
{
@@ -261,7 +259,7 @@
int range = random.nextInt(maxkeylength - minkeylength + 1);
int keylen = range + minkeylength;
int keyceil = keylen + (4 - (keylen % 4));
- keyBytes = new byte[keyceil];
+ byte[] keyBytes = new byte[keyceil];
long temp = 0;
for (int i = 0; i < keyceil / 4; i++) {
@@ -338,7 +336,7 @@
context.setStatus("About to add to accumulo");
context.write(tableName, m);
- context.setStatus("Added to accumulo " + key.toString());
+ context.setStatus("Added to accumulo " + key);
}
@Override
diff --git a/src/main/java/org/apache/accumulo/examples/reservations/ARS.java b/src/main/java/org/apache/accumulo/examples/reservations/ARS.java
index 1836780..034b694 100644
--- a/src/main/java/org/apache/accumulo/examples/reservations/ARS.java
+++ b/src/main/java/org/apache/accumulo/examples/reservations/ARS.java
@@ -54,8 +54,8 @@
private static final Logger log = LoggerFactory.getLogger(ARS.class);
- private AccumuloClient client;
- private String rTable;
+ private final AccumuloClient client;
+ private final String rTable;
public enum ReservationResult {
RESERVED, WAIT_LISTED
@@ -93,7 +93,8 @@
try (
ConditionalWriter cwriter = client.createConditionalWriter(rTable,
new ConditionalWriterConfig());
- Scanner scanner = new IsolatedScanner(client.createScanner(rTable, Authorizations.EMPTY))) {
+ Scanner scanner = client.createScanner(rTable, Authorizations.EMPTY);
+ Scanner isolatedScanner = new IsolatedScanner(scanner)) {
while (true) {
Status status = cwriter.write(update).getStatus();
switch (status) {
@@ -115,12 +116,12 @@
// sub-queues within the row that approximately maintain arrival order and use exponential
// back off to fairly merge the sub-queues into the main queue.
- scanner.setRange(new Range(row));
+ isolatedScanner.setRange(new Range(row));
int seq = -1;
int maxReservation = -1;
- for (Entry<Key,Value> entry : scanner) {
+ for (Entry<Key,Value> entry : isolatedScanner) {
String cf = entry.getKey().getColumnFamilyData().toString();
String cq = entry.getKey().getColumnQualifierData().toString();
String val = entry.getValue().toString();
@@ -176,14 +177,15 @@
try (
ConditionalWriter cwriter = client.createConditionalWriter(rTable,
new ConditionalWriterConfig());
- Scanner scanner = new IsolatedScanner(client.createScanner(rTable, Authorizations.EMPTY))) {
+ Scanner scanner = client.createScanner(rTable, Authorizations.EMPTY);
+ Scanner isolatedScanner = new IsolatedScanner(scanner)) {
while (true) {
- scanner.setRange(new Range(row));
+ isolatedScanner.setRange(new Range(row));
int seq = -1;
String reservation = null;
- for (Entry<Key,Value> entry : scanner) {
+ for (Entry<Key,Value> entry : isolatedScanner) {
String cf = entry.getKey().getColumnFamilyData().toString();
String cq = entry.getKey().getColumnQualifierData().toString();
String val = entry.getValue().toString();
@@ -230,14 +232,14 @@
String row = what + ":" + when;
// its important to use an isolated scanner so that only whole mutations are seen
- try (
- Scanner scanner = new IsolatedScanner(client.createScanner(rTable, Authorizations.EMPTY))) {
- scanner.setRange(new Range(row));
- scanner.fetchColumnFamily("res");
+ try (Scanner scanner = client.createScanner(rTable, Authorizations.EMPTY);
+ Scanner isolatedScanner = new IsolatedScanner(scanner)) {
+ isolatedScanner.setRange(new Range(row));
+ isolatedScanner.fetchColumnFamily("res");
List<String> reservations = new ArrayList<>();
- for (Entry<Key,Value> entry : scanner) {
+ for (Entry<Key,Value> entry : isolatedScanner) {
String val = entry.getValue().toString();
reservations.add(val);
}
@@ -266,15 +268,12 @@
ArrayList<Thread> threads = new ArrayList<>();
for (int i = 3; i < tokens.length; i++) {
final int whoIndex = i;
- Runnable reservationTask = new Runnable() {
- @Override
- public void run() {
- try {
- out.println(" " + String.format("%20s", tokens[whoIndex]) + " : "
- + fars.reserve(tokens[1], tokens[2], tokens[whoIndex]));
- } catch (Exception e) {
- log.warn("Could not write to the ConsoleReader.", e);
- }
+ Runnable reservationTask = () -> {
+ try {
+ out.println(" " + String.format("%20s", tokens[whoIndex]) + " : "
+ + fars.reserve(tokens[1], tokens[2], tokens[whoIndex]));
+ } catch (Exception e) {
+ log.warn("Could not write to the ConsoleReader.", e);
}
};
diff --git a/src/main/java/org/apache/accumulo/examples/shard/ContinuousQuery.java b/src/main/java/org/apache/accumulo/examples/shard/ContinuousQuery.java
index 3e1b0b9..343bcdd 100644
--- a/src/main/java/org/apache/accumulo/examples/shard/ContinuousQuery.java
+++ b/src/main/java/org/apache/accumulo/examples/shard/ContinuousQuery.java
@@ -65,9 +65,10 @@
opts.parseArgs(ContinuousQuery.class.getName(), args);
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientPropsPath()).build()) {
-
- ArrayList<Text[]> randTerms = findRandomTerms(
- client.createScanner(opts.doc2Term, Authorizations.EMPTY), opts.numTerms);
+ ArrayList<Text[]> randTerms;
+ try (Scanner scanner = client.createScanner(opts.doc2Term, Authorizations.EMPTY)) {
+ randTerms = findRandomTerms(scanner, opts.numTerms);
+ }
Random rand = new Random();
diff --git a/src/main/java/org/apache/accumulo/examples/shard/Index.java b/src/main/java/org/apache/accumulo/examples/shard/Index.java
index 63a12e0..772027c 100644
--- a/src/main/java/org/apache/accumulo/examples/shard/Index.java
+++ b/src/main/java/org/apache/accumulo/examples/shard/Index.java
@@ -77,17 +77,18 @@
}
}
} else {
- FileReader fr = new FileReader(src);
StringBuilder sb = new StringBuilder();
- char[] data = new char[4096];
- int len;
- while ((len = fr.read(data)) != -1) {
- sb.append(data, 0, len);
- }
+ try (FileReader fr = new FileReader(src)) {
- fr.close();
+ char[] data = new char[4096];
+ int len;
+ while ((len = fr.read(data)) != -1) {
+ sb.append(data, 0, len);
+ }
+
+ }
index(numPartitions, src.getAbsolutePath(), sb.toString(), splitRegex, bw);
}
diff --git a/src/test/java/org/apache/accumulo/examples/ExamplesIT.java b/src/test/java/org/apache/accumulo/examples/ExamplesIT.java
index c5853b2..7de11a0 100644
--- a/src/test/java/org/apache/accumulo/examples/ExamplesIT.java
+++ b/src/test/java/org/apache/accumulo/examples/ExamplesIT.java
@@ -43,6 +43,7 @@
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
import org.apache.accumulo.core.client.security.tokens.PasswordToken;
import org.apache.accumulo.core.conf.Property;
@@ -81,8 +82,6 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
-import com.google.common.collect.Iterators;
-
public class ExamplesIT extends AccumuloClusterHarness {
private static final BatchWriterConfig bwc = new BatchWriterConfig();
private static final String auths = "A,B";
@@ -123,6 +122,9 @@
@AfterEach
public void teardownTest() throws Exception {
+ if (bw != null) {
+ bw.close();
+ }
if (null != origAuths) {
c.securityOperations().changeUserAuthorizations(getAdminPrincipal(), origAuths);
}
@@ -163,7 +165,9 @@
bw.addMutation(m);
bw.close();
sleepUninterruptibly(1, TimeUnit.SECONDS);
- assertEquals(0, Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator()));
+ try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
+ assertTrue(scanner.stream().findAny().isEmpty());
+ }
}
@Test
@@ -186,22 +190,26 @@
bw.addMutation(m);
bw.flush();
- Iterator<Entry<Key,Value>> iter = c.createScanner(table, Authorizations.EMPTY).iterator();
- assertTrue(iter.hasNext(), "Iterator had no results");
- Entry<Key,Value> e = iter.next();
- assertEquals("1,3,4,2", e.getValue().toString(), "Results ");
- assertFalse(iter.hasNext(), "Iterator had additional results");
+ try (Scanner scanner = c.createScanner(table, Authorizations.EMPTY)) {
+ Iterator<Entry<Key,Value>> iter = scanner.iterator();
+ assertTrue(iter.hasNext(), "Iterator had no results");
+ Entry<Key,Value> e = iter.next();
+ assertEquals("1,3,4,2", e.getValue().toString(), "Results ");
+ assertFalse(iter.hasNext(), "Iterator had additional results");
- m = new Mutation("foo");
- m.put("a", "b", "0,20,20,2");
- bw.addMutation(m);
- bw.close();
+ m = new Mutation("foo");
+ m.put("a", "b", "0,20,20,2");
+ bw.addMutation(m);
+ bw.close();
+ }
- iter = c.createScanner(table, Authorizations.EMPTY).iterator();
- assertTrue(iter.hasNext(), "Iterator had no results");
- e = iter.next();
- assertEquals("0,20,24,4", e.getValue().toString(), "Results ");
- assertFalse(iter.hasNext(), "Iterator had additional results");
+ try (Scanner scanner = c.createScanner(table, Authorizations.EMPTY)) {
+ Iterator<Entry<Key,Value>> iter = scanner.iterator();
+ assertTrue(iter.hasNext(), "Iterator had no results");
+ Entry<Key,Value> e = iter.next();
+ assertEquals("0,20,24,4", e.getValue().toString(), "Results ");
+ assertFalse(iter.hasNext(), "Iterator had additional results");
+ }
}
@Test
@@ -215,18 +223,12 @@
bw = c.createBatchWriter(shard, bwc);
Index.index(30, src, "\\W+", bw);
bw.close();
- BatchScanner bs = c.createBatchScanner(shard, Authorizations.EMPTY, 4);
- List<String> found = Query.query(bs, Arrays.asList("foo", "bar"), null);
- bs.close();
- // should find ourselves
- boolean thisFile = false;
- for (String file : found) {
- if (file.endsWith("/ExamplesIT.java")) {
- thisFile = true;
- break;
- }
+ List<String> found;
+ try (BatchScanner bs = c.createBatchScanner(shard, Authorizations.EMPTY, 4)) {
+ found = Query.query(bs, Arrays.asList("foo", "bar"), null);
}
- assertTrue(thisFile);
+ // should find ourselves
+ assertTrue(found.stream().anyMatch(file -> file.endsWith("/ExamplesIT.java")));
String[] args = new String[] {"-c", getClientPropsFile(), "--shardTable", shard, "--doc2Term",
index};
diff --git a/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java b/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java
index 72214ff..d86b139 100644
--- a/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java
+++ b/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java
@@ -57,21 +57,23 @@
tableName = getUniqueNames(1)[0];
client = Accumulo.newClient().from(getClientProperties()).build();
client.tableOperations().create(tableName);
- BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
- ColumnVisibility cv = new ColumnVisibility();
- // / has 1 dir
- // /local has 2 dirs 1 file
- // /local/user1 has 2 files
- bw.addMutation(Ingest.buildMutation(cv, "/local", true, false, true, 272, 12345, null));
- bw.addMutation(Ingest.buildMutation(cv, "/local/user1", true, false, true, 272, 12345, null));
- bw.addMutation(Ingest.buildMutation(cv, "/local/user2", true, false, true, 272, 12345, null));
- bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 12345, null));
- bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 23456, null));
- bw.addMutation(
- Ingest.buildMutation(cv, "/local/user1/file1", false, false, false, 2024, 12345, null));
- bw.addMutation(
- Ingest.buildMutation(cv, "/local/user1/file2", false, false, false, 1028, 23456, null));
- bw.close();
+ try (BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig())) {
+ ColumnVisibility cv = new ColumnVisibility();
+ // / has 1 dir
+ // /local has 2 dirs 1 file
+ // /local/user1 has 2 files
+ bw.addMutation(Ingest.buildMutation(cv, "/local", true, false, true, 272, 12345, null));
+ bw.addMutation(Ingest.buildMutation(cv, "/local/user1", true, false, true, 272, 12345, null));
+ bw.addMutation(Ingest.buildMutation(cv, "/local/user2", true, false, true, 272, 12345, null));
+ bw.addMutation(
+ Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 12345, null));
+ bw.addMutation(
+ Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 23456, null));
+ bw.addMutation(
+ Ingest.buildMutation(cv, "/local/user1/file1", false, false, false, 2024, 12345, null));
+ bw.addMutation(
+ Ingest.buildMutation(cv, "/local/user1/file2", false, false, false, 1028, 23456, null));
+ }
}
@AfterEach
@@ -81,15 +83,11 @@
@Test
public void test() throws Exception {
- Scanner scanner = client.createScanner(tableName, new Authorizations());
- scanner.fetchColumn("dir", "counts");
- assertFalse(scanner.iterator().hasNext());
ScannerOpts scanOpts = new ScannerOpts();
BatchWriterOpts bwOpts = new BatchWriterOpts();
FileCount fc = new FileCount(client, tableName, Authorizations.EMPTY, new ColumnVisibility(),
scanOpts, bwOpts);
- fc.run();
ArrayList<Pair<String,String>> expected = new ArrayList<>();
expected.add(new Pair<>(QueryUtil.getRow("").toString(), "1,0,3,3"));
@@ -97,12 +95,19 @@
expected.add(new Pair<>(QueryUtil.getRow("/local/user1").toString(), "0,2,0,2"));
expected.add(new Pair<>(QueryUtil.getRow("/local/user2").toString(), "0,0,0,0"));
- int i = 0;
- for (Entry<Key,Value> e : scanner) {
- assertEquals(e.getKey().getRow().toString(), expected.get(i).getFirst());
- assertEquals(e.getValue().toString(), expected.get(i).getSecond());
- i++;
+ int actualCount = 0;
+ try (Scanner scanner = client.createScanner(tableName, new Authorizations())) {
+ scanner.fetchColumn("dir", "counts");
+ assertFalse(scanner.iterator().hasNext());
+
+ fc.run();
+
+ for (Entry<Key,Value> e : scanner) {
+ assertEquals(e.getKey().getRow().toString(), expected.get(actualCount).getFirst());
+ assertEquals(e.getValue().toString(), expected.get(actualCount).getSecond());
+ actualCount++;
+ }
}
- assertEquals(i, expected.size());
+ assertEquals(expected.size(), actualCount);
}
}
diff --git a/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamIT.java b/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamIT.java
index 948bb54..d709a90 100644
--- a/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamIT.java
+++ b/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamIT.java
@@ -64,8 +64,6 @@
private AccumuloClient client;
private String tableName;
private List<Entry<Key,Value>> data;
- private List<Entry<Key,Value>> baddata;
- private List<Entry<Key,Value>> multidata;
@BeforeEach
public void setupInstance() throws Exception {
@@ -99,7 +97,7 @@
addData(data, "d", "~chunk", 100, 0, "A&B", "");
addData(data, "e", "~chunk", 100, 0, "A&B", "asdfjkl;");
addData(data, "e", "~chunk", 100, 1, "A&B", "");
- baddata = new ArrayList<>();
+ List<Entry<Key,Value>> baddata = new ArrayList<>();
addData(baddata, "a", "~chunk", 100, 0, "A", "asdfjkl;");
addData(baddata, "b", "~chunk", 100, 0, "B", "asdfjkl;");
addData(baddata, "b", "~chunk", 100, 2, "C", "");
@@ -113,7 +111,7 @@
addData(baddata, "e", "~chunk", 100, 2, "I", "asdfjkl;");
addData(baddata, "f", "~chunk", 100, 2, "K", "asdfjkl;");
addData(baddata, "g", "~chunk", 100, 0, "L", "");
- multidata = new ArrayList<>();
+ List<Entry<Key,Value>> multidata = new ArrayList<>();
addData(multidata, "a", "~chunk", 100, 0, "A&B", "asdfjkl;");
addData(multidata, "a", "~chunk", 100, 1, "A&B", "");
addData(multidata, "a", "~chunk", 200, 0, "B&C", "asdfjkl;");
@@ -141,54 +139,54 @@
public void testWithAccumulo() throws AccumuloException, AccumuloSecurityException,
TableExistsException, TableNotFoundException, IOException {
client.tableOperations().create(tableName);
- BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
-
- for (Entry<Key,Value> e : data) {
- Key k = e.getKey();
- Mutation m = new Mutation(k.getRow());
- m.put(k.getColumnFamily(), k.getColumnQualifier(),
- new ColumnVisibility(k.getColumnVisibility()), e.getValue());
- bw.addMutation(m);
+ try (BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig())) {
+ for (Entry<Key,Value> e : data) {
+ Key k = e.getKey();
+ Mutation m = new Mutation(k.getRow());
+ m.put(k.getColumnFamily(), k.getColumnQualifier(),
+ new ColumnVisibility(k.getColumnVisibility()), e.getValue());
+ bw.addMutation(m);
+ }
}
- bw.close();
- Scanner scan = client.createScanner(tableName, AUTHS);
+ try (Scanner scan = client.createScanner(tableName, AUTHS)) {
- ChunkInputStream cis = new ChunkInputStream();
- byte[] b = new byte[20];
- int read;
- PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(scan.iterator());
+ byte[] b = new byte[20];
+ int read;
+ PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(scan.iterator());
- cis.setSource(pi);
- assertEquals(read = cis.read(b), 8);
- assertEquals(new String(b, 0, read), "asdfjkl;");
- assertEquals(read = cis.read(b), -1);
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertEquals(8, read = cis.read(b));
+ assertEquals("asdfjkl;", new String(b, 0, read));
+ assertEquals(-1, cis.read(b));
+ }
- cis.setSource(pi);
- assertEquals(read = cis.read(b), 10);
- assertEquals(new String(b, 0, read), "qwertyuiop");
- assertEquals(read = cis.read(b), -1);
- assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
- cis.close();
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertEquals(10, read = cis.read(b));
+ assertEquals("qwertyuiop", new String(b, 0, read));
+ assertEquals(-1, cis.read(b));
+ assertEquals("[A&B, B&C, D]", cis.getVisibilities().toString());
+ }
- cis.setSource(pi);
- assertEquals(read = cis.read(b), 16);
- assertEquals(new String(b, 0, read), "asdfjkl;asdfjkl;");
- assertEquals(read = cis.read(b), -1);
- assertEquals(cis.getVisibilities().toString(), "[A&B]");
- cis.close();
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertEquals(16, read = cis.read(b));
+ assertEquals("asdfjkl;asdfjkl;", new String(b, 0, read));
+ assertEquals(-1, cis.read(b));
+ assertEquals("[A&B]", cis.getVisibilities().toString());
+ }
- cis.setSource(pi);
- assertEquals(read = cis.read(b), -1);
- cis.close();
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertEquals(-1, cis.read(b));
+ }
- cis.setSource(pi);
- assertEquals(read = cis.read(b), 8);
- assertEquals(new String(b, 0, read), "asdfjkl;");
- assertEquals(read = cis.read(b), -1);
- cis.close();
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertEquals(8, read = cis.read(b));
+ assertEquals("asdfjkl;", new String(b, 0, read));
+ assertEquals(-1, cis.read(b));
+ }
- assertFalse(pi.hasNext());
+ assertFalse(pi.hasNext());
+ }
}
}
diff --git a/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamTest.java b/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamTest.java
index 70819e9..afb5878 100644
--- a/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamTest.java
+++ b/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamTest.java
@@ -18,6 +18,7 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.fail;
import java.io.IOException;
@@ -103,121 +104,109 @@
@Test
public void testExceptionOnMultipleSetSourceWithoutClose() throws IOException {
- ChunkInputStream cis = new ChunkInputStream();
PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(data.iterator());
- cis.setSource(pi);
- try {
- cis.setSource(pi);
- fail();
- } catch (IOException e) {
- /* expected */
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertThrows(IOException.class, () -> cis.setSource(pi));
}
- cis.close();
}
@Test
public void testExceptionOnGetVisBeforeClose() throws IOException {
- ChunkInputStream cis = new ChunkInputStream();
PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(data.iterator());
-
- cis.setSource(pi);
- try {
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertThrows(RuntimeException.class, cis::getVisibilities);
+ cis.close();
cis.getVisibilities();
- fail();
- } catch (RuntimeException e) {
- /* expected */
}
- cis.close();
- cis.getVisibilities();
}
@Test
public void testReadIntoBufferSmallerThanChunks() throws IOException {
- ChunkInputStream cis = new ChunkInputStream();
byte[] b = new byte[5];
+ int read;
PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(data.iterator());
- cis.setSource(pi);
- int read;
- assertEquals(read = cis.read(b), 5);
- assertEquals(new String(b, 0, read), "asdfj");
- assertEquals(read = cis.read(b), 3);
- assertEquals(new String(b, 0, read), "kl;");
- assertEquals(read = cis.read(b), -1);
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertEquals(read = cis.read(b), 5);
+ assertEquals(new String(b, 0, read), "asdfj");
+ assertEquals(read = cis.read(b), 3);
+ assertEquals(new String(b, 0, read), "kl;");
+ assertEquals(read = cis.read(b), -1);
+ }
- cis.setSource(pi);
- assertEquals(read = cis.read(b), 5);
- assertEquals(new String(b, 0, read), "qwert");
- assertEquals(read = cis.read(b), 5);
- assertEquals(new String(b, 0, read), "yuiop");
- assertEquals(read = cis.read(b), -1);
- assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
- cis.close();
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertEquals(read = cis.read(b), 5);
+ assertEquals(new String(b, 0, read), "qwert");
+ assertEquals(read = cis.read(b), 5);
+ assertEquals(new String(b, 0, read), "yuiop");
+ assertEquals(read = cis.read(b), -1);
+ assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
+ }
- cis.setSource(pi);
- assertEquals(read = cis.read(b), 5);
- assertEquals(new String(b, 0, read), "asdfj");
- assertEquals(read = cis.read(b), 5);
- assertEquals(new String(b, 0, read), "kl;as");
- assertEquals(read = cis.read(b), 5);
- assertEquals(new String(b, 0, read), "dfjkl");
- assertEquals(read = cis.read(b), 1);
- assertEquals(new String(b, 0, read), ";");
- assertEquals(read = cis.read(b), -1);
- assertEquals(cis.getVisibilities().toString(), "[A&B]");
- cis.close();
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertEquals(read = cis.read(b), 5);
+ assertEquals(new String(b, 0, read), "asdfj");
+ assertEquals(read = cis.read(b), 5);
+ assertEquals(new String(b, 0, read), "kl;as");
+ assertEquals(read = cis.read(b), 5);
+ assertEquals(new String(b, 0, read), "dfjkl");
+ assertEquals(read = cis.read(b), 1);
+ assertEquals(new String(b, 0, read), ";");
+ assertEquals(read = cis.read(b), -1);
+ assertEquals(cis.getVisibilities().toString(), "[A&B]");
+ }
- cis.setSource(pi);
- assertEquals(read = cis.read(b), -1);
- cis.close();
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertEquals(read = cis.read(b), -1);
+ }
- cis.setSource(pi);
- assertEquals(read = cis.read(b), 5);
- assertEquals(new String(b, 0, read), "asdfj");
- assertEquals(read = cis.read(b), 3);
- assertEquals(new String(b, 0, read), "kl;");
- assertEquals(read = cis.read(b), -1);
- cis.close();
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertEquals(read = cis.read(b), 5);
+ assertEquals(new String(b, 0, read), "asdfj");
+ assertEquals(read = cis.read(b), 3);
+ assertEquals(new String(b, 0, read), "kl;");
+ assertEquals(read = cis.read(b), -1);
+ }
assertFalse(pi.hasNext());
}
@Test
public void testReadIntoBufferLargerThanChunks() throws IOException {
- ChunkInputStream cis = new ChunkInputStream();
byte[] b = new byte[20];
int read;
PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(data.iterator());
- cis.setSource(pi);
- assertEquals(read = cis.read(b), 8);
- assertEquals(new String(b, 0, read), "asdfjkl;");
- assertEquals(read = cis.read(b), -1);
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertEquals(read = cis.read(b), 8);
+ assertEquals(new String(b, 0, read), "asdfjkl;");
+ assertEquals(read = cis.read(b), -1);
+ }
- cis.setSource(pi);
- assertEquals(read = cis.read(b), 10);
- assertEquals(new String(b, 0, read), "qwertyuiop");
- assertEquals(read = cis.read(b), -1);
- assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
- cis.close();
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertEquals(read = cis.read(b), 10);
+ assertEquals(new String(b, 0, read), "qwertyuiop");
+ assertEquals(read = cis.read(b), -1);
+ assertEquals(cis.getVisibilities().toString(), "[A&B, B&C, D]");
+ }
- cis.setSource(pi);
- assertEquals(read = cis.read(b), 16);
- assertEquals(new String(b, 0, read), "asdfjkl;asdfjkl;");
- assertEquals(read = cis.read(b), -1);
- assertEquals(cis.getVisibilities().toString(), "[A&B]");
- cis.close();
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertEquals(read = cis.read(b), 16);
+ assertEquals(new String(b, 0, read), "asdfjkl;asdfjkl;");
+ assertEquals(read = cis.read(b), -1);
+ assertEquals(cis.getVisibilities().toString(), "[A&B]");
+ }
- cis.setSource(pi);
- assertEquals(read = cis.read(b), -1);
- cis.close();
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertEquals(read = cis.read(b), -1);
+ }
- cis.setSource(pi);
- assertEquals(read = cis.read(b), 8);
- assertEquals(new String(b, 0, read), "asdfjkl;");
- assertEquals(read = cis.read(b), -1);
- cis.close();
+ try (ChunkInputStream cis = new ChunkInputStream(pi)) {
+ assertEquals(read = cis.read(b), 8);
+ assertEquals(new String(b, 0, read), "asdfjkl;");
+ assertEquals(read = cis.read(b), -1);
+ }
assertFalse(pi.hasNext());
}
@@ -233,13 +222,8 @@
}
private static void assumeExceptionOnClose(ChunkInputStream cis) {
- try {
- cis.close();
- fail();
- } catch (IOException e) {
- log.debug("EXCEPTION {}", e.getMessage());
- // expected, ignore
- }
+ var e = assertThrows(IOException.class, cis::close);
+ log.debug("EXCEPTION {}", e.getMessage());
}
@Test
@@ -247,7 +231,7 @@
ChunkInputStream cis = new ChunkInputStream();
byte[] b = new byte[20];
int read;
- PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(baddata.iterator());
+ final PeekingIterator<Entry<Key,Value>> pi = Iterators.peekingIterator(baddata.iterator());
cis.setSource(pi);
assumeExceptionOnRead(cis, b);
@@ -277,12 +261,8 @@
cis.close();
assertEquals(cis.getVisibilities().toString(), "[I, J]");
- try {
- cis.setSource(pi);
- fail();
- } catch (IOException e) {
- // expected, ignore
- }
+ assertThrows(IOException.class, () -> cis.setSource(pi));
+
assumeExceptionOnClose(cis);
assertEquals(cis.getVisibilities().toString(), "[K]");
@@ -293,8 +273,8 @@
assertFalse(pi.hasNext());
- pi = Iterators.peekingIterator(baddata.iterator());
- cis.setSource(pi);
+ final PeekingIterator<Entry<Key,Value>> pi2 = Iterators.peekingIterator(baddata.iterator());
+ cis.setSource(pi2);
assumeExceptionOnClose(cis);
}
diff --git a/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java b/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java
index cbad3cf..ce9beab 100644
--- a/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java
+++ b/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java
@@ -73,26 +73,27 @@
ExamplesIT.writeClientPropsFile(confFile, instance, keepers, "root", ROOT_PASSWORD);
try (AccumuloClient client = Accumulo.newClient().from(props).build()) {
client.tableOperations().create(tablename);
- BatchWriter bw = client.createBatchWriter(tablename);
- for (int i = 0; i < 10; i++) {
- Mutation m = new Mutation("" + i);
- m.put(input_cf, input_cq, "row" + i);
- bw.addMutation(m);
+ try (BatchWriter bw = client.createBatchWriter(tablename)) {
+ for (int i = 0; i < 10; i++) {
+ Mutation m = new Mutation("" + i);
+ m.put(input_cf, input_cq, "row" + i);
+ bw.addMutation(m);
+ }
}
- bw.close();
MiniAccumuloClusterImpl.ProcessInfo hash = getCluster().exec(RowHash.class,
Collections.singletonList(hadoopTmpDirArg), "-c", confFile, "-t", tablename, "--column",
input_cfcq);
assertEquals(0, hash.getProcess().waitFor());
- Scanner s = client.createScanner(tablename, Authorizations.EMPTY);
- s.fetchColumn(input_cf, output_cq);
- int i = 0;
- for (Entry<Key,Value> entry : s) {
+ try (Scanner s = client.createScanner(tablename, Authorizations.EMPTY)) {
+ s.fetchColumn(input_cf, output_cq);
+ int i = 0;
MessageDigest md = MessageDigest.getInstance("MD5");
- byte[] check = Base64.getEncoder().encode(md.digest(("row" + i).getBytes()));
- assertEquals(entry.getValue().toString(), new String(check));
- i++;
+ for (Entry<Key,Value> entry : s) {
+ byte[] check = Base64.getEncoder().encode(md.digest(("row" + i).getBytes()));
+ assertEquals(entry.getValue().toString(), new String(check));
+ i++;
+ }
}
}
}