Apply formatter changes on build
diff --git a/src/main/java/org/apache/accumulo/testing/TestEnv.java b/src/main/java/org/apache/accumulo/testing/TestEnv.java
index d36fa1a..74c0dd3 100644
--- a/src/main/java/org/apache/accumulo/testing/TestEnv.java
+++ b/src/main/java/org/apache/accumulo/testing/TestEnv.java
@@ -23,20 +23,20 @@
public TestEnv(String[] args) {
- Map<String, String> options = new HashMap<>();
+ Map<String,String> options = new HashMap<>();
List<String> arguments = new ArrayList<>();
for (int i = 0; i < args.length; i++) {
- if(args[i].equals("-o")) {
+ if (args[i].equals("-o")) {
i++;
- String[] tokens = args[i].split("=",2);
+ String[] tokens = args[i].split("=", 2);
options.put(tokens[0], tokens[1]);
} else {
arguments.add(args[i]);
}
}
- if(arguments.size() != 2) {
+ if (arguments.size() != 2) {
throw new IllegalArgumentException("Expected <testPropsPath> <clientPropsPath> arguments.");
}
@@ -46,7 +46,7 @@
this.testProps = TestProps.loadFromFile(testPropsPath);
this.clientProps = Accumulo.newClientProperties().from(clientPropsPath).build();
- options.forEach((k,v) -> testProps.setProperty(k, v));
+ options.forEach((k, v) -> testProps.setProperty(k, v));
}
public TestEnv(String testPropsPath, String clientPropsPath) {
@@ -118,8 +118,8 @@
hadoopConfig.set("fs.defaultFS", getHdfsRoot());
// Below is required due to bundled jar breaking default config.
// See http://stackoverflow.com/questions/17265002/hadoop-no-filesystem-for-scheme-file
- hadoopConfig
- .set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
+ hadoopConfig.set("fs.hdfs.impl",
+ org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
hadoopConfig.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName());
hadoopConfig.set("mapreduce.framework.name", "yarn");
hadoopConfig.set("yarn.resourcemanager.hostname", getYarnResourceManager());
diff --git a/src/main/java/org/apache/accumulo/testing/cli/ClientOpts.java b/src/main/java/org/apache/accumulo/testing/cli/ClientOpts.java
index 6643d44..2689b97 100644
--- a/src/main/java/org/apache/accumulo/testing/cli/ClientOpts.java
+++ b/src/main/java/org/apache/accumulo/testing/cli/ClientOpts.java
@@ -160,8 +160,8 @@
public String getClientConfigFile() {
if (clientConfigFile == null) {
- URL clientPropsUrl = ClientOpts.class.getClassLoader().getResource(
- "accumulo-client.properties");
+ URL clientPropsUrl = ClientOpts.class.getClassLoader()
+ .getResource("accumulo-client.properties");
if (clientPropsUrl != null) {
clientConfigFile = clientPropsUrl.getFile();
}
diff --git a/src/main/java/org/apache/accumulo/testing/continuous/BulkIngest.java b/src/main/java/org/apache/accumulo/testing/continuous/BulkIngest.java
index 35ff688..4b2e20b 100644
--- a/src/main/java/org/apache/accumulo/testing/continuous/BulkIngest.java
+++ b/src/main/java/org/apache/accumulo/testing/continuous/BulkIngest.java
@@ -41,8 +41,8 @@
import org.slf4j.LoggerFactory;
/**
- * Bulk import a million random key value pairs. Same format as ContinuousIngest and can be
- * verified by running ContinuousVerify.
+ * Bulk import a million random key value pairs. Same format as ContinuousIngest and can be verified
+ * by running ContinuousVerify.
*/
public class BulkIngest extends Configured implements Tool {
public static final int NUM_KEYS = 1_000_000;
@@ -88,8 +88,8 @@
try (AccumuloClient client = env.getAccumuloClient()) {
// make sure splits file is closed before continuing
- try (PrintStream out = new PrintStream(new BufferedOutputStream(fs.create(new Path(
- splitsFile))))) {
+ try (PrintStream out = new PrintStream(
+ new BufferedOutputStream(fs.create(new Path(splitsFile))))) {
Collection<Text> splits = client.tableOperations().listSplits(tableName, 100);
for (Text split : splits) {
out.println(Base64.getEncoder().encodeToString(split.copyBytes()));
@@ -151,8 +151,8 @@
}
@Override
- protected void map(LongWritable key, LongWritable value, Context context) throws IOException,
- InterruptedException {
+ protected void map(LongWritable key, LongWritable value, Context context)
+ throws IOException, InterruptedException {
currentRow.set(ContinuousIngest.genRow(key.get()));
// hack since we can't pass null - don't set first val (prevRow), we want it to be null
@@ -162,16 +162,16 @@
}
Key outputKey = new Key(currentRow, emptyCfCq, emptyCfCq);
- Value outputValue = ContinuousIngest.createValue(uuid.getBytes(), 0,
- currentValue.copyBytes(), null);
+ Value outputValue = ContinuousIngest.createValue(uuid.getBytes(), 0, currentValue.copyBytes(),
+ null);
context.write(outputKey, outputValue);
}
}
/**
- * Generates a million LongWritable keys. The LongWritable value points to the previous key.
- * The first key value pair has a value of 1L. This is translated to null in RandomMapper
+ * Generates a million LongWritable keys. The LongWritable value points to the previous key. The
+ * first key value pair has a value of 1L. This is translated to null in RandomMapper
*/
public static class RandomInputFormat extends InputFormat {
diff --git a/src/main/java/org/apache/accumulo/testing/continuous/ContinuousIngest.java b/src/main/java/org/apache/accumulo/testing/continuous/ContinuousIngest.java
index 957ce3e..0499b09 100644
--- a/src/main/java/org/apache/accumulo/testing/continuous/ContinuousIngest.java
+++ b/src/main/java/org/apache/accumulo/testing/continuous/ContinuousIngest.java
@@ -136,8 +136,8 @@
byte[] ingestInstanceId = UUID.randomUUID().toString().getBytes(UTF_8);
- log.info(String.format("UUID %d %s", System.currentTimeMillis(), new String(ingestInstanceId,
- UTF_8)));
+ log.info(String.format("UUID %d %s", System.currentTimeMillis(),
+ new String(ingestInstanceId, UTF_8)));
long count = 0;
final int flushInterval = getFlushEntries(env.getTestProperties());
diff --git a/src/main/java/org/apache/accumulo/testing/continuous/ContinuousOpts.java b/src/main/java/org/apache/accumulo/testing/continuous/ContinuousOpts.java
index cda9f4d..847942b 100644
--- a/src/main/java/org/apache/accumulo/testing/continuous/ContinuousOpts.java
+++ b/src/main/java/org/apache/accumulo/testing/continuous/ContinuousOpts.java
@@ -38,8 +38,8 @@
logger.setLevel(Level.TRACE);
logger.setAdditivity(false);
try {
- logger.addAppender(new FileAppender(new PatternLayout(
- "%d{dd HH:mm:ss,SSS} [%-8c{2}] %-5p: %m%n"), debugLog, true));
+ logger.addAppender(new FileAppender(
+ new PatternLayout("%d{dd HH:mm:ss,SSS} [%-8c{2}] %-5p: %m%n"), debugLog, true));
} catch (IOException ex) {
throw new RuntimeException(ex);
}
diff --git a/src/main/java/org/apache/accumulo/testing/continuous/ContinuousScanner.java b/src/main/java/org/apache/accumulo/testing/continuous/ContinuousScanner.java
index 9fd4a80..d453ec7 100644
--- a/src/main/java/org/apache/accumulo/testing/continuous/ContinuousScanner.java
+++ b/src/main/java/org/apache/accumulo/testing/continuous/ContinuousScanner.java
@@ -88,7 +88,7 @@
}
// System.out.println("P2 "+delta
- // +" "+numToScan+" "+distance+" "+((double)numToScan/count ));
+ // +" "+numToScan+" "+distance+" "+((double)numToScan/count ));
}
System.out.printf("SCN %d %s %d %d%n", t1, new String(scanStart, UTF_8), (t2 - t1), count);
diff --git a/src/main/java/org/apache/accumulo/testing/continuous/ContinuousVerify.java b/src/main/java/org/apache/accumulo/testing/continuous/ContinuousVerify.java
index 7604dda..880643d 100644
--- a/src/main/java/org/apache/accumulo/testing/continuous/ContinuousVerify.java
+++ b/src/main/java/org/apache/accumulo/testing/continuous/ContinuousVerify.java
@@ -145,14 +145,14 @@
String tableName = env.getAccumuloTableName();
- Job job = Job.getInstance(getConf(), this.getClass().getSimpleName() + "_" + tableName + "_"
- + System.currentTimeMillis());
+ Job job = Job.getInstance(getConf(),
+ this.getClass().getSimpleName() + "_" + tableName + "_" + System.currentTimeMillis());
job.setJarByClass(this.getClass());
job.setInputFormatClass(AccumuloInputFormat.class);
- boolean scanOffline = Boolean.parseBoolean(env
- .getTestProperty(TestProps.CI_VERIFY_SCAN_OFFLINE));
+ boolean scanOffline = Boolean
+ .parseBoolean(env.getTestProperty(TestProps.CI_VERIFY_SCAN_OFFLINE));
int maxMaps = Integer.parseInt(env.getTestProperty(TestProps.CI_VERIFY_MAX_MAPS));
int reducers = Integer.parseInt(env.getTestProperty(TestProps.CI_VERIFY_REDUCERS));
String outputDir = env.getTestProperty(TestProps.CI_VERIFY_OUTPUT_DIR);
diff --git a/src/main/java/org/apache/accumulo/testing/continuous/CreateTable.java b/src/main/java/org/apache/accumulo/testing/continuous/CreateTable.java
index 9373144..fc3440a 100644
--- a/src/main/java/org/apache/accumulo/testing/continuous/CreateTable.java
+++ b/src/main/java/org/apache/accumulo/testing/continuous/CreateTable.java
@@ -39,8 +39,8 @@
System.exit(-1);
}
- int numTablets = Integer.parseInt(env
- .getTestProperty(TestProps.CI_COMMON_ACCUMULO_NUM_TABLETS));
+ int numTablets = Integer
+ .parseInt(env.getTestProperty(TestProps.CI_COMMON_ACCUMULO_NUM_TABLETS));
if (numTablets < 1) {
System.err.println("ERROR: numTablets < 1");
@@ -70,8 +70,8 @@
client.tableOperations().create(tableName, ntc);
- System.out.println("Created Accumulo table '" + tableName + "' with " + numTablets
- + " tablets");
+ System.out
+ .println("Created Accumulo table '" + tableName + "' with " + numTablets + " tablets");
}
}
diff --git a/src/main/java/org/apache/accumulo/testing/continuous/TimeBinner.java b/src/main/java/org/apache/accumulo/testing/continuous/TimeBinner.java
index 48b009f..83e42aa 100644
--- a/src/main/java/org/apache/accumulo/testing/continuous/TimeBinner.java
+++ b/src/main/java/org/apache/accumulo/testing/continuous/TimeBinner.java
@@ -35,7 +35,13 @@
public class TimeBinner {
enum Operation {
- AVG, SUM, MIN, MAX, COUNT, CUMULATIVE, AMM, // avg,min,max
+ AVG,
+ SUM,
+ MIN,
+ MAX,
+ COUNT,
+ CUMULATIVE,
+ AMM, // avg,min,max
AMM_HACK1 // special case
}
diff --git a/src/main/java/org/apache/accumulo/testing/continuous/UndefinedAnalyzer.java b/src/main/java/org/apache/accumulo/testing/continuous/UndefinedAnalyzer.java
index 6920a7a..1c61bdb 100644
--- a/src/main/java/org/apache/accumulo/testing/continuous/UndefinedAnalyzer.java
+++ b/src/main/java/org/apache/accumulo/testing/continuous/UndefinedAnalyzer.java
@@ -16,6 +16,8 @@
*/
package org.apache.accumulo.testing.continuous;
+import static java.nio.charset.StandardCharsets.UTF_8;
+
import java.io.BufferedReader;
import java.io.File;
import java.io.InputStreamReader;
@@ -31,7 +33,6 @@
import java.util.Map.Entry;
import java.util.TreeMap;
-import com.beust.jcommander.Parameter;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.BatchScanner;
import org.apache.accumulo.core.data.Key;
@@ -40,7 +41,7 @@
import org.apache.accumulo.testing.cli.ClientOpts;
import org.apache.hadoop.io.Text;
-import static java.nio.charset.StandardCharsets.UTF_8;
+import com.beust.jcommander.Parameter;
/**
* BUGS This code does not handle the fact that these files could include log events from previous
@@ -197,14 +198,16 @@
if (pos1 > 0 && pos2 > 0 && pos3 == -1) {
String tid = tablet.substring(0, pos1);
- String endRow = tablet.charAt(pos1) == '<' ? "8000000000000000" : tablet.substring(pos1 + 1, pos2);
+ String endRow = tablet.charAt(pos1) == '<' ? "8000000000000000"
+ : tablet.substring(pos1 + 1, pos2);
String prevEndRow = tablet.charAt(pos2) == '<' ? "" : tablet.substring(pos2 + 1);
if (tid.equals(tableId)) {
// System.out.println(" "+server+" "+tid+" "+endRow+" "+prevEndRow);
Date date = sdf.parse(day + " " + time);
// System.out.println(" "+date);
- assignments.add(new TabletAssignment(tablet, endRow, prevEndRow, server, date.getTime()));
+ assignments.add(
+ new TabletAssignment(tablet, endRow, prevEndRow, server, date.getTime()));
}
} else if (!tablet.startsWith("!0")) {
@@ -262,7 +265,7 @@
}
try (AccumuloClient client = opts.createClient();
- BatchScanner bscanner = client.createBatchScanner(opts.tableName, opts.auths)) {
+ BatchScanner bscanner = client.createBatchScanner(opts.tableName, opts.auths)) {
List<Range> refs = new ArrayList<>();
for (UndefinedNode undefinedNode : undefs)
@@ -270,9 +273,9 @@
bscanner.setRanges(refs);
- HashMap<String, List<String>> refInfo = new HashMap<>();
+ HashMap<String,List<String>> refInfo = new HashMap<>();
- for (Entry<Key, Value> entry : bscanner) {
+ for (Entry<Key,Value> entry : bscanner) {
String ref = entry.getKey().getRow().toString();
List<String> vals = refInfo.computeIfAbsent(ref, k -> new ArrayList<>());
vals.add(entry.getValue().toString());
@@ -313,9 +316,11 @@
}
if (ta == null)
- System.out.println(undefinedNode.undef + " " + undefinedNode.ref + " " + uuid + " " + t1 + " " + t2);
+ System.out.println(
+ undefinedNode.undef + " " + undefinedNode.ref + " " + uuid + " " + t1 + " " + t2);
else
- System.out.println(undefinedNode.undef + " " + undefinedNode.ref + " " + ta.tablet + " " + ta.server + " " + uuid + " " + t1 + " " + t2);
+ System.out.println(undefinedNode.undef + " " + undefinedNode.ref + " " + ta.tablet
+ + " " + ta.server + " " + uuid + " " + t1 + " " + t2);
}
} else {
diff --git a/src/main/java/org/apache/accumulo/testing/ingest/BulkImportDirectory.java b/src/main/java/org/apache/accumulo/testing/ingest/BulkImportDirectory.java
index 1cd0a39..a75364c 100644
--- a/src/main/java/org/apache/accumulo/testing/ingest/BulkImportDirectory.java
+++ b/src/main/java/org/apache/accumulo/testing/ingest/BulkImportDirectory.java
@@ -39,16 +39,16 @@
String failures = null;
}
- public static void main(String[] args) throws IOException, AccumuloException,
- AccumuloSecurityException, TableNotFoundException {
+ public static void main(String[] args)
+ throws IOException, AccumuloException, AccumuloSecurityException, TableNotFoundException {
final FileSystem fs = FileSystem.get(new Configuration());
Opts opts = new Opts();
- System.err
- .println("Deprecated syntax for BulkImportDirectory, please use the new style (see --help)");
+ System.err.println(
+ "Deprecated syntax for BulkImportDirectory, please use the new style (see --help)");
opts.parseArgs(BulkImportDirectory.class.getName(), args);
fs.delete(new Path(opts.failures), true);
fs.mkdirs(new Path(opts.failures));
- opts.createClient().tableOperations()
- .importDirectory(opts.tableName, opts.source, opts.failures, false);
+ opts.createClient().tableOperations().importDirectory(opts.tableName, opts.source,
+ opts.failures, false);
}
}
diff --git a/src/main/java/org/apache/accumulo/testing/ingest/TestIngest.java b/src/main/java/org/apache/accumulo/testing/ingest/TestIngest.java
index a7d392c..a478659 100644
--- a/src/main/java/org/apache/accumulo/testing/ingest/TestIngest.java
+++ b/src/main/java/org/apache/accumulo/testing/ingest/TestIngest.java
@@ -31,9 +31,9 @@
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.TableExistsException;
import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.security.SecurityErrorCode;
import org.apache.accumulo.core.client.rfile.RFile;
import org.apache.accumulo.core.client.rfile.RFileWriter;
+import org.apache.accumulo.core.client.security.SecurityErrorCode;
import org.apache.accumulo.core.clientImpl.TabletServerBatchWriter;
import org.apache.accumulo.core.data.ConstraintViolationSummary;
import org.apache.accumulo.core.data.Key;
@@ -76,8 +76,7 @@
@Parameter(names = "--cols", description = "the number of columns to ingest per row")
public int cols = 1;
- @Parameter(
- names = "--random",
+ @Parameter(names = "--random",
description = "insert random rows and use the given number to seed the psuedo-random number generator")
public Integer random = null;
@@ -108,8 +107,8 @@
public FileSystem fs = null;
}
- public static void createTable(AccumuloClient client, Opts args) throws AccumuloException,
- AccumuloSecurityException, TableExistsException {
+ public static void createTable(AccumuloClient client, Opts args)
+ throws AccumuloException, AccumuloSecurityException, TableExistsException {
if (args.createTable) {
TreeSet<Text> splits = getSplitPoints(args.startRow, args.startRow + args.rows,
args.numsplits);
@@ -256,8 +255,8 @@
} else {
byte value[];
if (opts.random != null) {
- value = genRandomValue(random, randomValue, opts.random.intValue(), rowid
- + opts.startRow, j);
+ value = genRandomValue(random, randomValue, opts.random.intValue(),
+ rowid + opts.startRow, j);
} else {
value = bytevals[j % bytevals.length];
}
@@ -279,8 +278,8 @@
} else {
byte value[];
if (opts.random != null) {
- value = genRandomValue(random, randomValue, opts.random.intValue(), rowid
- + opts.startRow, j);
+ value = genRandomValue(random, randomValue, opts.random.intValue(),
+ rowid + opts.startRow, j);
} else {
value = bytevals[j % bytevals.length];
}
@@ -305,9 +304,10 @@
bw.close();
} catch (MutationsRejectedException e) {
if (e.getSecurityErrorCodes().size() > 0) {
- for (Entry<TabletId,Set<SecurityErrorCode>> entry : e.getSecurityErrorCodes().entrySet()) {
- System.err.println("ERROR : Not authorized to write to : " + entry.getKey()
- + " due to " + entry.getValue());
+ for (Entry<TabletId,Set<SecurityErrorCode>> entry : e.getSecurityErrorCodes()
+ .entrySet()) {
+ System.err.println("ERROR : Not authorized to write to : " + entry.getKey() + " due to "
+ + entry.getValue());
}
}
@@ -325,11 +325,10 @@
int totalValues = opts.rows * opts.cols;
double elapsed = (stopTime - startTime) / 1000.0;
- System.out
- .printf(
- "%,12d records written | %,8d records/sec | %,12d bytes written | %,8d bytes/sec | %6.3f secs %n",
- totalValues, (int) (totalValues / elapsed), bytesWritten,
- (int) (bytesWritten / elapsed), elapsed);
+ System.out.printf(
+ "%,12d records written | %,8d records/sec | %,12d bytes written | %,8d bytes/sec | %6.3f secs %n",
+ totalValues, (int) (totalValues / elapsed), bytesWritten, (int) (bytesWritten / elapsed),
+ elapsed);
}
public static void ingest(AccumuloClient c, Opts opts, Configuration conf)
diff --git a/src/main/java/org/apache/accumulo/testing/ingest/VerifyIngest.java b/src/main/java/org/apache/accumulo/testing/ingest/VerifyIngest.java
index db7e828..9b01bc1 100644
--- a/src/main/java/org/apache/accumulo/testing/ingest/VerifyIngest.java
+++ b/src/main/java/org/apache/accumulo/testing/ingest/VerifyIngest.java
@@ -75,8 +75,8 @@
}
}
- private static void verifyIngest(AccumuloClient client, Opts opts) throws AccumuloException,
- AccumuloSecurityException, TableNotFoundException {
+ private static void verifyIngest(AccumuloClient client, Opts opts)
+ throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
byte[][] bytevals = TestIngest.generateValues(opts.dataSize);
Authorizations labelAuths = new Authorizations("L1", "L2", "G1", "GROUP2");
@@ -119,8 +119,8 @@
byte ev[];
if (opts.random != null) {
- ev = TestIngest
- .genRandomValue(random, randomValue, opts.random, expectedRow, expectedCol);
+ ev = TestIngest.genRandomValue(random, randomValue, opts.random, expectedRow,
+ expectedCol);
} else {
ev = bytevals[expectedCol % bytevals.length];
}
@@ -175,23 +175,23 @@
}
if (colNum != expectedCol) {
- log.error("colNum != expectedCol " + colNum + " != " + expectedCol + " rowNum : "
- + rowNum);
+ log.error(
+ "colNum != expectedCol " + colNum + " != " + expectedCol + " rowNum : " + rowNum);
errors++;
}
if (expectedRow >= (opts.rows + opts.startRow)) {
- log.error("expectedRow (" + expectedRow
- + ") >= (ingestArgs.rows + ingestArgs.startRow) (" + (opts.rows + opts.startRow)
- + "), get batch returned data passed end key");
+ log.error(
+ "expectedRow (" + expectedRow + ") >= (ingestArgs.rows + ingestArgs.startRow) ("
+ + (opts.rows + opts.startRow) + "), get batch returned data passed end key");
errors++;
break;
}
byte value[];
if (opts.random != null) {
- value = TestIngest
- .genRandomValue(random, randomValue, opts.random, expectedRow, colNum);
+ value = TestIngest.genRandomValue(random, randomValue, opts.random, expectedRow,
+ colNum);
} else {
value = bytevals[colNum % bytevals.length];
}
@@ -235,11 +235,10 @@
throw new AccumuloException("Did not read expected number of rows. Saw "
+ (expectedRow - opts.startRow) + " expected " + opts.rows);
} else {
- System.out
- .printf(
- "%,12d records read | %,8d records/sec | %,12d bytes read | %,8d bytes/sec | %6.3f secs %n",
- recsRead, (int) ((recsRead) / ((t2 - t1) / 1000.0)), bytesRead,
- (int) (bytesRead / ((t2 - t1) / 1000.0)), (t2 - t1) / 1000.0);
+ System.out.printf(
+ "%,12d records read | %,8d records/sec | %,12d bytes read | %,8d bytes/sec | %6.3f secs %n",
+ recsRead, (int) ((recsRead) / ((t2 - t1) / 1000.0)), bytesRead,
+ (int) (bytesRead / ((t2 - t1) / 1000.0)), (t2 - t1) / 1000.0);
}
}
diff --git a/src/main/java/org/apache/accumulo/testing/mapreduce/RowHash.java b/src/main/java/org/apache/accumulo/testing/mapreduce/RowHash.java
index 086025c..f3a7328 100644
--- a/src/main/java/org/apache/accumulo/testing/mapreduce/RowHash.java
+++ b/src/main/java/org/apache/accumulo/testing/mapreduce/RowHash.java
@@ -20,17 +20,14 @@
import java.util.Base64;
import java.util.Collection;
import java.util.Collections;
-import java.util.List;
-import java.util.Set;
import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.hadoop.mapreduce.AccumuloInputFormat;
-import org.apache.accumulo.hadoop.mapreduce.AccumuloOutputFormat;
-import org.apache.accumulo.hadoopImpl.mapreduce.lib.MapReduceClientOnRequiredTable;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.hadoop.mapreduce.AccumuloInputFormat;
+import org.apache.accumulo.hadoop.mapreduce.AccumuloOutputFormat;
+import org.apache.accumulo.hadoopImpl.mapreduce.lib.MapReduceClientOnRequiredTable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.MD5Hash;
@@ -50,8 +47,8 @@
@Override
public void map(Key row, Value data, Context context) throws IOException, InterruptedException {
Mutation m = new Mutation(row.getRow());
- m.put(new Text("cf-HASHTYPE"), new Text("cq-MD5BASE64"), new Value(Base64.getEncoder()
- .encode(MD5Hash.digest(data.toString()).getDigest())));
+ m.put(new Text("cf-HASHTYPE"), new Text("cq-MD5BASE64"),
+ new Value(Base64.getEncoder().encode(MD5Hash.digest(data.toString()).getDigest())));
context.write(null, m);
context.progress();
}
diff --git a/src/main/java/org/apache/accumulo/testing/mapreduce/TeraSortIngest.java b/src/main/java/org/apache/accumulo/testing/mapreduce/TeraSortIngest.java
index b4ae57c..e2fa4f5 100644
--- a/src/main/java/org/apache/accumulo/testing/mapreduce/TeraSortIngest.java
+++ b/src/main/java/org/apache/accumulo/testing/mapreduce/TeraSortIngest.java
@@ -27,9 +27,9 @@
import org.apache.accumulo.core.client.BatchWriterConfig;
import org.apache.accumulo.core.client.mapreduce.AccumuloOutputFormat;
-import org.apache.accumulo.hadoopImpl.mapreduce.lib.MapReduceClientOnRequiredTable;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.hadoopImpl.mapreduce.lib.MapReduceClientOnRequiredTable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.LongWritable;
@@ -141,8 +141,8 @@
}
@Override
- public void initialize(InputSplit split, TaskAttemptContext context) throws IOException,
- InterruptedException {}
+ public void initialize(InputSplit split, TaskAttemptContext context)
+ throws IOException, InterruptedException {}
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
@@ -169,8 +169,8 @@
long totalRows = job.getConfiguration().getLong(NUMROWS, 0);
int numSplits = job.getConfiguration().getInt(NUMSPLITS, 1);
long rowsPerSplit = totalRows / numSplits;
- System.out.println("Generating " + totalRows + " using " + numSplits + " maps with step of "
- + rowsPerSplit);
+ System.out.println(
+ "Generating " + totalRows + " using " + numSplits + " maps with step of " + rowsPerSplit);
ArrayList<InputSplit> splits = new ArrayList<>(numSplits);
long currentRow = 0;
for (int split = 0; split < numSplits - 1; ++split) {
@@ -318,8 +318,8 @@
}
@Override
- public void map(LongWritable row, NullWritable ignored, Context context) throws IOException,
- InterruptedException {
+ public void map(LongWritable row, NullWritable ignored, Context context)
+ throws IOException, InterruptedException {
context.setStatus("Entering");
long rowId = row.get();
if (rand == null) {
diff --git a/src/main/java/org/apache/accumulo/testing/merkle/MerkleTree.java b/src/main/java/org/apache/accumulo/testing/merkle/MerkleTree.java
index bf384b8..cc7f810 100644
--- a/src/main/java/org/apache/accumulo/testing/merkle/MerkleTree.java
+++ b/src/main/java/org/apache/accumulo/testing/merkle/MerkleTree.java
@@ -46,8 +46,9 @@
Pair<Integer,Integer> pairToJoin = findNextPair(buffer);
// Make a parent node from them
- MerkleTreeNode parent = new MerkleTreeNode(Arrays.asList(buffer.get(pairToJoin.getFirst()),
- buffer.get(pairToJoin.getSecond())), digestAlgorithm);
+ MerkleTreeNode parent = new MerkleTreeNode(
+ Arrays.asList(buffer.get(pairToJoin.getFirst()), buffer.get(pairToJoin.getSecond())),
+ digestAlgorithm);
// Insert it back into the "tree" at the position of the first child
buffer.set(pairToJoin.getFirst(), parent);
diff --git a/src/main/java/org/apache/accumulo/testing/merkle/MerkleTreeNode.java b/src/main/java/org/apache/accumulo/testing/merkle/MerkleTreeNode.java
index a83e13c..52b3684 100644
--- a/src/main/java/org/apache/accumulo/testing/merkle/MerkleTreeNode.java
+++ b/src/main/java/org/apache/accumulo/testing/merkle/MerkleTreeNode.java
@@ -71,12 +71,12 @@
if (null == childrenRange) {
childrenRange = child.getRange();
} else {
- List<Range> overlappingRanges = Range.mergeOverlapping(Arrays.asList(childrenRange,
- child.getRange()));
+ List<Range> overlappingRanges = Range
+ .mergeOverlapping(Arrays.asList(childrenRange, child.getRange()));
if (1 != overlappingRanges.size()) {
log.error("Tried to merge non-contiguous ranges: {} {}", childrenRange, child.getRange());
- throw new IllegalArgumentException("Ranges must be contiguous: " + childrenRange + ", "
- + child.getRange());
+ throw new IllegalArgumentException(
+ "Ranges must be contiguous: " + childrenRange + ", " + child.getRange());
}
childrenRange = overlappingRanges.get(0);
diff --git a/src/main/java/org/apache/accumulo/testing/merkle/cli/CompareTables.java b/src/main/java/org/apache/accumulo/testing/merkle/cli/CompareTables.java
index 70c1469..3d1111e 100644
--- a/src/main/java/org/apache/accumulo/testing/merkle/cli/CompareTables.java
+++ b/src/main/java/org/apache/accumulo/testing/merkle/cli/CompareTables.java
@@ -24,9 +24,9 @@
import java.util.Map;
import java.util.Map.Entry;
+import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.AccumuloException;
import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.TableExistsException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Range;
@@ -95,9 +95,9 @@
this.opts = opts;
}
- private Map<String,String> computeAllHashes() throws AccumuloException,
- AccumuloSecurityException, TableExistsException, NoSuchAlgorithmException,
- TableNotFoundException, FileNotFoundException {
+ private Map<String,String> computeAllHashes()
+ throws AccumuloException, AccumuloSecurityException, TableExistsException,
+ NoSuchAlgorithmException, TableNotFoundException, FileNotFoundException {
try (AccumuloClient client = opts.createClient()) {
final Map<String,String> hashesByTable = new HashMap<>();
@@ -105,8 +105,8 @@
final String outputTableName = table + "_merkle";
if (client.tableOperations().exists(outputTableName)) {
- throw new IllegalArgumentException("Expected output table name to not yet exist: "
- + outputTableName);
+ throw new IllegalArgumentException(
+ "Expected output table name to not yet exist: " + outputTableName);
}
client.tableOperations().create(outputTableName);
@@ -123,8 +123,8 @@
}
ComputeRootHash computeRootHash = new ComputeRootHash();
- String hash = Hex.encodeHexString(computeRootHash.getHash(client, outputTableName,
- opts.getHashName()));
+ String hash = Hex
+ .encodeHexString(computeRootHash.getHash(client, outputTableName, opts.getHashName()));
hashesByTable.put(table, hash);
}
diff --git a/src/main/java/org/apache/accumulo/testing/merkle/cli/ComputeRootHash.java b/src/main/java/org/apache/accumulo/testing/merkle/cli/ComputeRootHash.java
index c08bd3d..7d06a6e 100644
--- a/src/main/java/org/apache/accumulo/testing/merkle/cli/ComputeRootHash.java
+++ b/src/main/java/org/apache/accumulo/testing/merkle/cli/ComputeRootHash.java
@@ -51,8 +51,8 @@
String hashName;
}
- private byte[] getHash(ComputeRootHashOpts opts) throws TableNotFoundException,
- NoSuchAlgorithmException {
+ private byte[] getHash(ComputeRootHashOpts opts)
+ throws TableNotFoundException, NoSuchAlgorithmException {
try (AccumuloClient client = opts.createClient()) {
return getHash(client, opts.tableName, opts.hashName);
}
diff --git a/src/main/java/org/apache/accumulo/testing/merkle/cli/GenerateHashes.java b/src/main/java/org/apache/accumulo/testing/merkle/cli/GenerateHashes.java
index ae8931c..9b21037 100644
--- a/src/main/java/org/apache/accumulo/testing/merkle/cli/GenerateHashes.java
+++ b/src/main/java/org/apache/accumulo/testing/merkle/cli/GenerateHashes.java
@@ -135,8 +135,8 @@
}
}
- public void run(GenerateHashesOpts opts) throws TableNotFoundException,
- AccumuloSecurityException, AccumuloException, NoSuchAlgorithmException, FileNotFoundException {
+ public void run(GenerateHashesOpts opts) throws TableNotFoundException, AccumuloSecurityException,
+ AccumuloException, NoSuchAlgorithmException, FileNotFoundException {
try (AccumuloClient client = opts.createClient()) {
Collection<Range> ranges = getRanges(client, opts.tableName, opts.getSplitsFile());
run(client, opts.tableName, opts.getOutputTableName(), opts.getHashName(),
@@ -144,9 +144,10 @@
}
}
- public void run(final AccumuloClient client, final String inputTableName, final String outputTableName, final String digestName, int numThreads,
- final boolean iteratorPushdown, final Collection<Range> ranges) throws TableNotFoundException, AccumuloException,
- NoSuchAlgorithmException {
+ public void run(final AccumuloClient client, final String inputTableName,
+ final String outputTableName, final String digestName, int numThreads,
+ final boolean iteratorPushdown, final Collection<Range> ranges)
+ throws TableNotFoundException, AccumuloException, NoSuchAlgorithmException {
if (!client.tableOperations().exists(outputTableName)) {
throw new IllegalArgumentException(outputTableName + " does not exist, please create it");
}
@@ -203,7 +204,8 @@
}
// Log some progress
- log.info("{} computed digest for {} of {}", Thread.currentThread().getName(), range, Hex.encodeHexString(v.get()));
+ log.info("{} computed digest for {} of {}", Thread.currentThread().getName(), range,
+ Hex.encodeHexString(v.get()));
try {
bw.addMutation(m);
@@ -221,7 +223,8 @@
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
- log.error("Interrupted while waiting for executor service to gracefully complete. Exiting now");
+ log.error(
+ "Interrupted while waiting for executor service to gracefully complete. Exiting now");
svc.shutdownNow();
return;
}
diff --git a/src/main/java/org/apache/accumulo/testing/merkle/cli/ManualComparison.java b/src/main/java/org/apache/accumulo/testing/merkle/cli/ManualComparison.java
index ef4c97e..49129fe 100644
--- a/src/main/java/org/apache/accumulo/testing/merkle/cli/ManualComparison.java
+++ b/src/main/java/org/apache/accumulo/testing/merkle/cli/ManualComparison.java
@@ -24,9 +24,9 @@
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.testing.cli.ClientOpts;
import com.beust.jcommander.Parameter;
-import org.apache.accumulo.testing.cli.ClientOpts;
/**
* Accepts two table names and enumerates all key-values pairs in both checking for correctness. All
diff --git a/src/main/java/org/apache/accumulo/testing/merkle/package-info.java b/src/main/java/org/apache/accumulo/testing/merkle/package-info.java
index 2469d6f..6b5c7c2 100644
--- a/src/main/java/org/apache/accumulo/testing/merkle/package-info.java
+++ b/src/main/java/org/apache/accumulo/testing/merkle/package-info.java
@@ -15,25 +15,28 @@
* limitations under the License.
*/
/**
- * A <a href="http://en.wikipedia.org/wiki/Merkle_tree">Merkle tree</a> is a hash tree and can be used to evaluate equality over large
- * files with the ability to ascertain what portions of the files differ. Each leaf of the Merkle tree is some hash of a
- * portion of the file, with each leaf corresponding to some "range" within the source file. As such, if all leaves are
- * considered as ranges of the source file, the "sum" of all leaves creates a contiguous range over the entire file.
+ * A <a href="http://en.wikipedia.org/wiki/Merkle_tree">Merkle tree</a> is a hash tree and can be
+ * used to evaluate equality over large files with the ability to ascertain what portions of the
+ * files differ. Each leaf of the Merkle tree is some hash of a portion of the file, with each leaf
+ * corresponding to some "range" within the source file. As such, if all leaves are considered as
+ * ranges of the source file, the "sum" of all leaves creates a contiguous range over the entire
+ * file.
* <p>
- * The parent of any nodes (typically, a binary tree; however this is not required) is the concatenation of the hashes of
- * the children. We can construct a full tree by walking up the tree, creating parents from children, until we have a root
- * node. To check equality of two files that each have a merkle tree built, we can very easily compare the value of at the
- * root of the Merkle tree to know whether or not the files are the same.
+ * The parent of any nodes (typically, a binary tree; however this is not required) is the
+ * concatenation of the hashes of the children. We can construct a full tree by walking up the tree,
+ * creating parents from children, until we have a root node. To check equality of two files that
+ * each have a merkle tree built, we can very easily compare the value of at the root of the Merkle
+ * tree to know whether or not the files are the same.
* <p>
- * Additionally, in the situation where we have two files with we expect to be the same but are not, we can walk back down
- * the tree, finding subtrees that are equal and subtrees that are not. Subtrees that are equal correspond to portions of
- * the files which are identical, where subtrees that are not equal correspond to discrepancies between the two files.
+ * Additionally, in the situation where we have two files with we expect to be the same but are not,
+ * we can walk back down the tree, finding subtrees that are equal and subtrees that are not.
+ * Subtrees that are equal correspond to portions of the files which are identical, where subtrees
+ * that are not equal correspond to discrepancies between the two files.
* <p>
- * We can apply this concept to Accumulo, treating a table as a file, and ranges within a file as an Accumulo Range. We can
- * then compute the hashes over each of these Ranges and compute the entire Merkle tree to determine if two tables are
- * equivalent.
+ * We can apply this concept to Accumulo, treating a table as a file, and ranges within a file as an
+ * Accumulo Range. We can then compute the hashes over each of these Ranges and compute the entire
+ * Merkle tree to determine if two tables are equivalent.
*
* @since 1.7.0
*/
package org.apache.accumulo.testing.merkle;
-
diff --git a/src/main/java/org/apache/accumulo/testing/performance/Report.java b/src/main/java/org/apache/accumulo/testing/performance/Report.java
index a8bb1c8..f80a80b 100644
--- a/src/main/java/org/apache/accumulo/testing/performance/Report.java
+++ b/src/main/java/org/apache/accumulo/testing/performance/Report.java
@@ -56,8 +56,8 @@
}
public Builder result(String id, LongSummaryStatistics stats, String description) {
- results.add(new Result(id, new Stats(stats.getMin(), stats.getMax(), stats.getSum(), stats
- .getAverage(), stats.getCount()), description, Purpose.COMPARISON));
+ results.add(new Result(id, new Stats(stats.getMin(), stats.getMax(), stats.getSum(),
+ stats.getAverage(), stats.getCount()), description, Purpose.COMPARISON));
return this;
}
@@ -72,8 +72,8 @@
}
public Builder info(String id, LongSummaryStatistics stats, String description) {
- results.add(new Result(id, new Stats(stats.getMin(), stats.getMax(), stats.getSum(), stats
- .getAverage(), stats.getCount()), description, Purpose.INFORMATIONAL));
+ results.add(new Result(id, new Stats(stats.getMin(), stats.getMax(), stats.getSum(),
+ stats.getAverage(), stats.getCount()), description, Purpose.INFORMATIONAL));
return this;
}
diff --git a/src/main/java/org/apache/accumulo/testing/performance/impl/Csv.java b/src/main/java/org/apache/accumulo/testing/performance/impl/Csv.java
index a371d11..2af9e1c 100644
--- a/src/main/java/org/apache/accumulo/testing/performance/impl/Csv.java
+++ b/src/main/java/org/apache/accumulo/testing/performance/impl/Csv.java
@@ -79,24 +79,27 @@
}
public static void main(String[] args) throws Exception {
- Map<RowId, Map<String, Double>> rows = new TreeMap<>();
+ Map<RowId,Map<String,Double>> rows = new TreeMap<>();
for (String file : args) {
Collection<ContextualReport> reports = Compare.readReports(file);
- Instant minStart = reports.stream().map(cr -> cr.startTime).map(Instant::parse).min(Instant::compareTo).get();
+ Instant minStart = reports.stream().map(cr -> cr.startTime).map(Instant::parse)
+ .min(Instant::compareTo).get();
- String version = Iterables.getOnlyElement(reports.stream().map(cr -> cr.accumuloVersion).collect(toSet()));
+ String version = Iterables
+ .getOnlyElement(reports.stream().map(cr -> cr.accumuloVersion).collect(toSet()));
- Map<String, Double> row = new HashMap<>();
+ Map<String,Double> row = new HashMap<>();
for (ContextualReport report : reports) {
- String id = report.id != null ? report.id : report.testClass.substring(report.testClass.lastIndexOf('.')+1);
+ String id = report.id != null ? report.id
+ : report.testClass.substring(report.testClass.lastIndexOf('.') + 1);
for (Result result : report.results) {
- if(result.purpose == Result.Purpose.COMPARISON) {
- row.put(id+"."+result.id, result.data.doubleValue());
+ if (result.purpose == Result.Purpose.COMPARISON) {
+ row.put(id + "." + result.id, result.data.doubleValue());
}
}
}
@@ -104,12 +107,14 @@
rows.put(new RowId(minStart, version), row);
}
- List<String> allCols = rows.values().stream().flatMap(row -> row.keySet().stream()).distinct().sorted().collect(toList());
+ List<String> allCols = rows.values().stream().flatMap(row -> row.keySet().stream()).distinct()
+ .sorted().collect(toList());
// print header
- print(Stream.concat(Stream.of("Start Time","Version"), allCols.stream()).collect(joining(",")));
+ print(
+ Stream.concat(Stream.of("Start Time", "Version"), allCols.stream()).collect(joining(",")));
- rows.forEach((id, row)->{
+ rows.forEach((id, row) -> {
StringJoiner joiner = new StringJoiner(",");
joiner.add(id.startTime.toString());
joiner.add(id.accumuloVersion);
diff --git a/src/main/java/org/apache/accumulo/testing/performance/impl/MergeSiteConfig.java b/src/main/java/org/apache/accumulo/testing/performance/impl/MergeSiteConfig.java
index 0e98f85..f01afdd 100644
--- a/src/main/java/org/apache/accumulo/testing/performance/impl/MergeSiteConfig.java
+++ b/src/main/java/org/apache/accumulo/testing/performance/impl/MergeSiteConfig.java
@@ -31,17 +31,18 @@
String className = args[0];
Path confFile = Paths.get(args[1], "accumulo.properties");
- PerformanceTest perfTest = Class.forName(className).asSubclass(PerformanceTest.class).newInstance();
+ PerformanceTest perfTest = Class.forName(className).asSubclass(PerformanceTest.class)
+ .newInstance();
Properties props = new Properties();
- try(Reader in = Files.newBufferedReader(confFile)){
+ try (Reader in = Files.newBufferedReader(confFile)) {
props.load(in);
}
- perfTest.getSystemConfig().getAccumuloConfig().forEach((k,v) -> props.setProperty(k, v));
+ perfTest.getSystemConfig().getAccumuloConfig().forEach((k, v) -> props.setProperty(k, v));
- try(Writer out = Files.newBufferedWriter(confFile)){
+ try (Writer out = Files.newBufferedWriter(confFile)) {
props.store(out, "Modified by performance test");
}
}
diff --git a/src/main/java/org/apache/accumulo/testing/performance/impl/PerfTestRunner.java b/src/main/java/org/apache/accumulo/testing/performance/impl/PerfTestRunner.java
index 2f230cd..c85b267 100644
--- a/src/main/java/org/apache/accumulo/testing/performance/impl/PerfTestRunner.java
+++ b/src/main/java/org/apache/accumulo/testing/performance/impl/PerfTestRunner.java
@@ -63,8 +63,8 @@
DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyyMMddHHmmss");
String time = Instant.now().atZone(ZoneId.systemDefault()).format(formatter);
- Path outputFile = Paths.get(outputDir, perfTest.getClass().getSimpleName() + "_" + time
- + ".json");
+ Path outputFile = Paths.get(outputDir,
+ perfTest.getClass().getSimpleName() + "_" + time + ".json");
try (Writer writer = Files.newBufferedWriter(outputFile)) {
gson.toJson(report, writer);
diff --git a/src/main/java/org/apache/accumulo/testing/performance/tests/ProbabilityFilter.java b/src/main/java/org/apache/accumulo/testing/performance/tests/ProbabilityFilter.java
index 5c37201..6924f5b 100644
--- a/src/main/java/org/apache/accumulo/testing/performance/tests/ProbabilityFilter.java
+++ b/src/main/java/org/apache/accumulo/testing/performance/tests/ProbabilityFilter.java
@@ -9,9 +9,9 @@
public class ProbabilityFilter extends YieldingFilter {
@Override
- protected BiPredicate<Key, Value> createPredicate(Map<String,String> options) {
+ protected BiPredicate<Key,Value> createPredicate(Map<String,String> options) {
double matchProbability = Double.parseDouble(options.get("probability"));
Random rand = new Random();
- return (k,v) -> rand.nextDouble() < matchProbability;
+ return (k, v) -> rand.nextDouble() < matchProbability;
}
}
diff --git a/src/main/java/org/apache/accumulo/testing/performance/tests/RandomCachedLookupsPT.java b/src/main/java/org/apache/accumulo/testing/performance/tests/RandomCachedLookupsPT.java
index 8fcd200..c09fe2e 100644
--- a/src/main/java/org/apache/accumulo/testing/performance/tests/RandomCachedLookupsPT.java
+++ b/src/main/java/org/apache/accumulo/testing/performance/tests/RandomCachedLookupsPT.java
@@ -90,8 +90,8 @@
long d128 = doLookups(env.getClient(), 128, NUM_LOOKUPS_PER_THREAD);
reportBuilder.id("smalls");
- reportBuilder
- .description("Runs multiple threads each doing lots of small random scans. For this test data and index cache are enabled.");
+ reportBuilder.description(
+ "Runs multiple threads each doing lots of small random scans. For this test data and index cache are enabled.");
reportBuilder.info("warmup", 32 * NUM_LOOKUPS_PER_THREAD, warmup,
"Random lookup per sec for 32 threads");
reportBuilder.info("lookups_1", NUM_LOOKUPS_PER_THREAD, d1,
@@ -127,9 +127,11 @@
return reportBuilder.build();
}
- public static void writeData(Report.Builder reportBuilder, AccumuloClient client, int numRows) throws Exception {
+ public static void writeData(Report.Builder reportBuilder, AccumuloClient client, int numRows)
+ throws Exception {
- reportBuilder.parameter("rows", numRows, "Number of random rows written. Each row has 4 columns.");
+ reportBuilder.parameter("rows", numRows,
+ "Number of random rows written. Each row has 4 columns.");
NewTableConfiguration ntc = new NewTableConfiguration();
Map<String,String> props = new HashMap<>();
@@ -150,7 +152,8 @@
long t2 = System.currentTimeMillis();
SortedSet<Text> partitionKeys = new TreeSet<>(
- Stream.of("1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f").map(Text::new).collect(toList()));
+ Stream.of("1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f")
+ .map(Text::new).collect(toList()));
client.tableOperations().addSplits("scanpt", partitionKeys);
long t3 = System.currentTimeMillis();
@@ -197,10 +200,12 @@
reportBuilder.info("split", t3 - t2, "Time to split table in ms");
reportBuilder.info("write", 4 * numRows, t4 - t3, "Rate to write data in entries/sec");
reportBuilder.info("compact", 4 * numRows, t5 - t4, "Rate to compact table in entries/sec");
- reportBuilder.info("fullScan", 4 * numRows, t6 - t5, "Rate to do full table scan in entries/sec");
+ reportBuilder.info("fullScan", 4 * numRows, t6 - t5,
+ "Rate to do full table scan in entries/sec");
}
- private static long doLookups(AccumuloClient client, int numThreads, int numScansPerThread) throws Exception {
+ private static long doLookups(AccumuloClient client, int numThreads, int numScansPerThread)
+ throws Exception {
ExecutorService es = Executors.newFixedThreadPool(numThreads);
@@ -220,7 +225,7 @@
es.shutdown();
- return t2 -t1;
+ return t2 - t1;
}
private static void doLookups(AccumuloClient client, int numScans) {
diff --git a/src/main/java/org/apache/accumulo/testing/performance/tests/ScanExecutorPT.java b/src/main/java/org/apache/accumulo/testing/performance/tests/ScanExecutorPT.java
index 0844683..20911cc 100644
--- a/src/main/java/org/apache/accumulo/testing/performance/tests/ScanExecutorPT.java
+++ b/src/main/java/org/apache/accumulo/testing/performance/tests/ScanExecutorPT.java
@@ -72,12 +72,12 @@
siteCfg.put(Property.TSERV_MINTHREADS.getKey(), "200");
siteCfg.put(Property.TSERV_SCAN_EXECUTORS_PREFIX.getKey() + "se1.threads",
SCAN_EXECUTOR_THREADS);
- siteCfg
- .put(Property.TSERV_SCAN_EXECUTORS_PREFIX.getKey() + "se1.prioritizer", SCAN_PRIORITIZER);
+ siteCfg.put(Property.TSERV_SCAN_EXECUTORS_PREFIX.getKey() + "se1.prioritizer",
+ SCAN_PRIORITIZER);
siteCfg.put(Property.TSERV_SCAN_EXECUTORS_PREFIX.getKey() + "se2.threads",
SCAN_EXECUTOR_THREADS);
- siteCfg
- .put(Property.TSERV_SCAN_EXECUTORS_PREFIX.getKey() + "se2.prioritizer", SCAN_PRIORITIZER);
+ siteCfg.put(Property.TSERV_SCAN_EXECUTORS_PREFIX.getKey() + "se2.prioritizer",
+ SCAN_PRIORITIZER);
return new SystemConfiguration().setAccumuloConfig(siteCfg);
}
@@ -155,8 +155,8 @@
return System.currentTimeMillis() - t1;
}
- private long scan(String tableName, AccumuloClient c, AtomicBoolean stop, Map<String,String> hints)
- throws TableNotFoundException {
+ private long scan(String tableName, AccumuloClient c, AtomicBoolean stop,
+ Map<String,String> hints) throws TableNotFoundException {
long count = 0;
while (!stop.get()) {
try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
diff --git a/src/main/java/org/apache/accumulo/testing/performance/tests/ScanFewFamiliesPT.java b/src/main/java/org/apache/accumulo/testing/performance/tests/ScanFewFamiliesPT.java
index d2bddf4..4278b2c 100644
--- a/src/main/java/org/apache/accumulo/testing/performance/tests/ScanFewFamiliesPT.java
+++ b/src/main/java/org/apache/accumulo/testing/performance/tests/ScanFewFamiliesPT.java
@@ -69,10 +69,10 @@
for (int numFams : new int[] {1, 2, 4, 8, 16}) {
LongSummaryStatistics stats = runScans(env, tableName, numFams);
String fams = Strings.padStart(numFams + "", 2, '0');
- builder.info("f" + fams + "_stats", stats, "Times in ms to fetch " + numFams
- + " families from all rows");
- builder.result("f" + fams, stats.getAverage(), "Average time in ms to fetch " + numFams
- + " families from all rows");
+ builder.info("f" + fams + "_stats", stats,
+ "Times in ms to fetch " + numFams + " families from all rows");
+ builder.result("f" + fams, stats.getAverage(),
+ "Average time in ms to fetch " + numFams + " families from all rows");
}
builder.id("sfewfam");
@@ -96,10 +96,11 @@
return stats;
}
- private static long scan(String tableName, AccumuloClient c, Random rand, int numFamilies) throws TableNotFoundException {
+ private static long scan(String tableName, AccumuloClient c, Random rand, int numFamilies)
+ throws TableNotFoundException {
Set<Text> families = new HashSet<>(numFamilies);
- while(families.size() < numFamilies) {
+ while (families.size() < numFamilies) {
families.add(new Text(TestData.fam(rand.nextInt(NUM_FAMS))));
}
diff --git a/src/main/java/org/apache/accumulo/testing/performance/tests/YieldingScanExecutorPT.java b/src/main/java/org/apache/accumulo/testing/performance/tests/YieldingScanExecutorPT.java
index 5d50007..59571fb 100644
--- a/src/main/java/org/apache/accumulo/testing/performance/tests/YieldingScanExecutorPT.java
+++ b/src/main/java/org/apache/accumulo/testing/performance/tests/YieldingScanExecutorPT.java
@@ -135,18 +135,19 @@
builder.parameter("short_threads", NUM_SHORT_SCANS_THREADS, "Threads used to run short scans.");
builder.parameter("long_threads", NUM_LONG_SCANS,
"Threads running long fileter scans. Each thread repeatedly scans entire table for "
- + "duration of test randomly returning a few of the keys.");
+ + "duration of test randomly returning a few of the keys.");
builder.parameter("rows", NUM_ROWS, "Rows in test table");
builder.parameter("familes", NUM_FAMS, "Families per row in test table");
builder.parameter("qualifiers", NUM_QUALS, "Qualifiers per family in test table");
builder.parameter("server_scan_threads", SCAN_EXECUTOR_THREADS,
"Server side scan handler threads that each executor has. There are 2 executors.");
- builder.parameter("filter_probabilities", FILTER_PROBABILITIES, "The chances that one of the long "
- + "filter scans will return any key it sees. The probabilites are cycled through when "
- + "starting long scans.");
- builder.parameter("filter_yield_time", FILTER_YIELD_TIME, "The time in ms after which one of "
- + "the long filter scans will yield.");
+ builder.parameter("filter_probabilities", FILTER_PROBABILITIES,
+ "The chances that one of the long "
+ + "filter scans will return any key it sees. The probabilites are cycled through when "
+ + "starting long scans.");
+ builder.parameter("filter_yield_time", FILTER_YIELD_TIME,
+ "The time in ms after which one of " + "the long filter scans will yield.");
builder.parameter("quick_scan_time", QUICK_SCAN_TIME, "The threshold time in ms for deciding "
+ "what is a quick vs long scan. Times less than this are sent to one executor and longer "
+ "times are sent to another.");
@@ -168,8 +169,8 @@
return System.currentTimeMillis() - t1;
}
- private long scan(String tableName, AccumuloClient c, AtomicBoolean stop, String filterProbability)
- throws TableNotFoundException {
+ private long scan(String tableName, AccumuloClient c, AtomicBoolean stop,
+ String filterProbability) throws TableNotFoundException {
long count = 0;
while (!stop.get()) {
try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY)) {
diff --git a/src/main/java/org/apache/accumulo/testing/performance/util/TestData.java b/src/main/java/org/apache/accumulo/testing/performance/util/TestData.java
index 2351084..66a7f7e 100644
--- a/src/main/java/org/apache/accumulo/testing/performance/util/TestData.java
+++ b/src/main/java/org/apache/accumulo/testing/performance/util/TestData.java
@@ -42,8 +42,8 @@
return FastFormat.toZeroPaddedString(v, 9, 16, EMPTY);
}
- public static void generate(AccumuloClient client, String tableName, int rows, int fams, int quals)
- throws Exception {
+ public static void generate(AccumuloClient client, String tableName, int rows, int fams,
+ int quals) throws Exception {
try (BatchWriter writer = client.createBatchWriter(tableName)) {
int v = 0;
for (int r = 0; r < rows; r++) {
diff --git a/src/main/java/org/apache/accumulo/testing/performance/util/TestExecutor.java b/src/main/java/org/apache/accumulo/testing/performance/util/TestExecutor.java
index b6c4609..42b95d0 100644
--- a/src/main/java/org/apache/accumulo/testing/performance/util/TestExecutor.java
+++ b/src/main/java/org/apache/accumulo/testing/performance/util/TestExecutor.java
@@ -45,11 +45,13 @@
}
public Stream<T> stream() {
- return futures.stream().map(f -> {try {
- return f.get();
- } catch (InterruptedException | ExecutionException e) {
- throw new RuntimeException(e);
- }});
+ return futures.stream().map(f -> {
+ try {
+ return f.get();
+ } catch (InterruptedException | ExecutionException e) {
+ throw new RuntimeException(e);
+ }
+ });
}
@Override
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/Module.java b/src/main/java/org/apache/accumulo/testing/randomwalk/Module.java
index e15b9f5..c832bac 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/Module.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/Module.java
@@ -272,8 +272,8 @@
numHops++;
if (!adjMap.containsKey(curNodeId) && !curNodeId.startsWith("alias.")) {
- throw new Exception("Reached node(" + curNodeId + ") without outgoing edges in module("
- + this + ")");
+ throw new Exception(
+ "Reached node(" + curNodeId + ") without outgoing edges in module(" + this + ")");
}
AdjList adj = adjMap.get(curNodeId);
String nextNodeId = adj.randomNeighbor();
@@ -519,8 +519,8 @@
// set the schema
SchemaFactory sf = SchemaFactory.newInstance(XMLConstants.W3C_XML_SCHEMA_NS_URI);
- Schema moduleSchema = sf.newSchema(this.getClass().getClassLoader()
- .getResource("randomwalk/module.xsd"));
+ Schema moduleSchema = sf
+ .newSchema(this.getClass().getClassLoader().getResource("randomwalk/module.xsd"));
dbf.setSchema(moduleSchema);
// parse the document
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/RandWalkEnv.java b/src/main/java/org/apache/accumulo/testing/randomwalk/RandWalkEnv.java
index 216b995..e3ed805 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/RandWalkEnv.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/RandWalkEnv.java
@@ -48,8 +48,8 @@
* @throws NumberFormatException
* if any configuration property cannot be parsed
*/
- public MultiTableBatchWriter getMultiTableBatchWriter() throws AccumuloException,
- AccumuloSecurityException {
+ public MultiTableBatchWriter getMultiTableBatchWriter()
+ throws AccumuloException, AccumuloSecurityException {
if (mtbw == null) {
mtbw = getAccumuloClient().createMultiTableBatchWriter();
}
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/BulkPlusOne.java b/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/BulkPlusOne.java
index 0ab337e..64db3c6 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/BulkPlusOne.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/BulkPlusOne.java
@@ -24,10 +24,10 @@
import java.util.concurrent.atomic.AtomicLong;
import org.apache.accumulo.core.client.IteratorSetting.Column;
+import org.apache.accumulo.core.client.rfile.RFile;
import org.apache.accumulo.core.client.rfile.RFileWriter;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.client.rfile.RFile;
import org.apache.accumulo.testing.randomwalk.RandWalkEnv;
import org.apache.accumulo.testing.randomwalk.State;
import org.apache.hadoop.fs.FileStatus;
@@ -96,8 +96,8 @@
}
writer.close();
}
- env.getAccumuloClient().tableOperations()
- .importDirectory(Setup.getTableName(), dir.toString(), fail.toString(), true);
+ env.getAccumuloClient().tableOperations().importDirectory(Setup.getTableName(), dir.toString(),
+ fail.toString(), true);
fs.delete(dir, true);
FileStatus[] failures = fs.listStatus(fail);
if (failures != null && failures.length > 0) {
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Compact.java b/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Compact.java
index 329a397..4db7c1a 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Compact.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Compact.java
@@ -27,8 +27,8 @@
final Text[] points = Merge.getRandomTabletRange(state);
final String rangeString = Merge.rangeToString(points);
log.info("Compacting " + rangeString);
- env.getAccumuloClient().tableOperations()
- .compact(Setup.getTableName(), points[0], points[1], false, true);
+ env.getAccumuloClient().tableOperations().compact(Setup.getTableName(), points[0], points[1],
+ false, true);
log.info("Compaction " + rangeString + " finished");
}
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/ConsistencyCheck.java b/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/ConsistencyCheck.java
index 119a3fe..f011e0a 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/ConsistencyCheck.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/ConsistencyCheck.java
@@ -38,8 +38,8 @@
log.info("Checking " + row);
String user = env.getAccumuloClient().whoami();
Authorizations auths = env.getAccumuloClient().securityOperations().getUserAuthorizations(user);
- try (Scanner scanner = new IsolatedScanner(env.getAccumuloClient().createScanner(
- Setup.getTableName(), auths))) {
+ try (Scanner scanner = new IsolatedScanner(
+ env.getAccumuloClient().createScanner(Setup.getTableName(), auths))) {
scanner.setRange(new Range(row));
scanner.fetchColumnFamily(BulkPlusOne.CHECK_COLUMN_FAMILY);
Value v = null;
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Merge.java b/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Merge.java
index c9835d6..cc898c3 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Merge.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Merge.java
@@ -39,8 +39,8 @@
}
public static Text getRandomRow(Random rand) {
- return new Text(String.format(BulkPlusOne.FMT, (rand.nextLong() & 0x7fffffffffffffffl)
- % BulkPlusOne.LOTS));
+ return new Text(
+ String.format(BulkPlusOne.FMT, (rand.nextLong() & 0x7fffffffffffffffl) % BulkPlusOne.LOTS));
}
public static Text[] getRandomTabletRange(State state) {
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Split.java b/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Split.java
index 1aac330..2588cb1 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Split.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Split.java
@@ -32,8 +32,8 @@
Random rand = (Random) state.get("rand");
int count = rand.nextInt(20);
for (int i = 0; i < count; i++)
- splits.add(new Text(String.format(BulkPlusOne.FMT, (rand.nextLong() & 0x7fffffffffffffffl)
- % BulkPlusOne.LOTS)));
+ splits.add(new Text(String.format(BulkPlusOne.FMT,
+ (rand.nextLong() & 0x7fffffffffffffffl) % BulkPlusOne.LOTS)));
log.info("splitting " + splits);
env.getAccumuloClient().tableOperations().addSplits(Setup.getTableName(), splits);
log.info("split for " + splits + " finished");
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Verify.java b/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Verify.java
index 8c7ecba..c5e5b1a 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Verify.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/bulk/Verify.java
@@ -25,7 +25,6 @@
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
-import com.beust.jcommander.Parameter;
import org.apache.accumulo.core.client.AccumuloClient;
import org.apache.accumulo.core.client.RowIterator;
import org.apache.accumulo.core.client.Scanner;
@@ -38,6 +37,8 @@
import org.apache.accumulo.testing.randomwalk.Test;
import org.apache.hadoop.io.Text;
+import com.beust.jcommander.Parameter;
+
public class Verify extends Test {
private static byte[] zero = new byte[] {'0'};
@@ -88,8 +89,8 @@
long curr = Long.parseLong(entry.getKey().getColumnQualifier().toString());
if (curr - 1 != prev)
- throw new Exception("Bad marker count " + entry.getKey() + " " + entry.getValue() + " "
- + prev);
+ throw new Exception(
+ "Bad marker count " + entry.getKey() + " " + entry.getValue() + " " + prev);
if (!entry.getValue().toString().equals("1"))
throw new Exception("Bad marker value " + entry.getKey() + " " + entry.getValue());
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/BulkImport.java b/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/BulkImport.java
index b618ca2..df45f87 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/BulkImport.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/BulkImport.java
@@ -29,12 +29,12 @@
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.client.TableOfflineException;
+import org.apache.accumulo.core.client.rfile.RFile;
+import org.apache.accumulo.core.client.rfile.RFileWriter;
import org.apache.accumulo.core.data.ColumnUpdate;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.client.rfile.RFile;
-import org.apache.accumulo.core.client.rfile.RFileWriter;
import org.apache.accumulo.testing.randomwalk.RandWalkEnv;
import org.apache.accumulo.testing.randomwalk.State;
import org.apache.accumulo.testing.randomwalk.Test;
@@ -109,8 +109,8 @@
fs.mkdirs(new Path(bulkDir + "_f"));
try {
- BatchWriter bw = new RFileBatchWriter(env.getHadoopConfiguration(), fs, bulkDir
- + "/file01.rf");
+ BatchWriter bw = new RFileBatchWriter(env.getHadoopConfiguration(), fs,
+ bulkDir + "/file01.rf");
try {
TreeSet<Long> rows = new TreeSet<>();
int numRows = rand.nextInt(100000);
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/ChangeAuthorizations.java b/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/ChangeAuthorizations.java
index 0fbf24f..5a0a9fc 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/ChangeAuthorizations.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/ChangeAuthorizations.java
@@ -43,8 +43,8 @@
String userName = userNames.get(rand.nextInt(userNames.size()));
try {
- List<byte[]> auths = new ArrayList<>(client.securityOperations()
- .getUserAuthorizations(userName).getAuthorizations());
+ List<byte[]> auths = new ArrayList<>(
+ client.securityOperations().getUserAuthorizations(userName).getAuthorizations());
if (rand.nextBoolean()) {
String authorization = String.format("a%d", rand.nextInt(5000));
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/CloneTable.java b/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/CloneTable.java
index 811ef7b..f97ba01 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/CloneTable.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/CloneTable.java
@@ -59,7 +59,8 @@
} catch (AccumuloException e) {
Throwable cause = e.getCause();
if (cause != null && cause instanceof NamespaceNotFoundException)
- log.debug("Clone: " + srcTableName + " to " + newTableName + " failed, namespace not found");
+ log.debug(
+ "Clone: " + srcTableName + " to " + newTableName + " failed, namespace not found");
else
throw e;
}
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/ConcurrentFixture.java b/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/ConcurrentFixture.java
index edaf632..71595b2 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/ConcurrentFixture.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/ConcurrentFixture.java
@@ -20,8 +20,8 @@
import java.util.List;
import java.util.Random;
-import org.apache.accumulo.testing.randomwalk.RandWalkEnv;
import org.apache.accumulo.testing.randomwalk.Fixture;
+import org.apache.accumulo.testing.randomwalk.RandWalkEnv;
import org.apache.accumulo.testing.randomwalk.State;
import org.apache.hadoop.io.Text;
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/Config.java b/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/Config.java
index 48d8eea..7c35899 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/Config.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/Config.java
@@ -115,8 +115,8 @@
int choice = Integer.parseInt(lastSetting.toString());
Property property = settings[choice].property;
log.debug("Setting " + property.getKey() + " back to " + property.getDefaultValue());
- env.getAccumuloClient().instanceOperations()
- .setProperty(property.getKey(), property.getDefaultValue());
+ env.getAccumuloClient().instanceOperations().setProperty(property.getKey(),
+ property.getDefaultValue());
}
lastSetting = state.getOkIfAbsent(LAST_TABLE_SETTING);
if (lastSetting != null) {
@@ -128,8 +128,8 @@
log.debug("Setting " + property.getKey() + " on " + table + " back to "
+ property.getDefaultValue());
try {
- env.getAccumuloClient().tableOperations()
- .setProperty(table, property.getKey(), property.getDefaultValue());
+ env.getAccumuloClient().tableOperations().setProperty(table, property.getKey(),
+ property.getDefaultValue());
} catch (AccumuloException ex) {
if (ex.getCause() instanceof ThriftTableOperationException) {
ThriftTableOperationException ttoe = (ThriftTableOperationException) ex.getCause();
@@ -150,8 +150,8 @@
log.debug("Setting " + property.getKey() + " on " + namespace + " back to "
+ property.getDefaultValue());
try {
- env.getAccumuloClient().namespaceOperations()
- .setProperty(namespace, property.getKey(), property.getDefaultValue());
+ env.getAccumuloClient().namespaceOperations().setProperty(namespace, property.getKey(),
+ property.getDefaultValue());
} catch (AccumuloException ex) {
if (ex.getCause() instanceof ThriftTableOperationException) {
ThriftTableOperationException ttoe = (ThriftTableOperationException) ex.getCause();
@@ -194,8 +194,8 @@
state.set(LAST_TABLE_SETTING, table + "," + choice);
log.debug("Setting " + setting.property.getKey() + " on table " + table + " to " + newValue);
try {
- env.getAccumuloClient().tableOperations()
- .setProperty(table, setting.property.getKey(), "" + newValue);
+ env.getAccumuloClient().tableOperations().setProperty(table, setting.property.getKey(),
+ "" + newValue);
} catch (AccumuloException ex) {
if (ex.getCause() instanceof ThriftTableOperationException) {
ThriftTableOperationException ttoe = (ThriftTableOperationException) ex.getCause();
@@ -222,11 +222,11 @@
// generate a random value
long newValue = random.nextLong(setting.min, setting.max);
state.set(LAST_NAMESPACE_SETTING, namespace + "," + choice);
- log.debug("Setting " + setting.property.getKey() + " on namespace " + namespace + " to "
- + newValue);
+ log.debug(
+ "Setting " + setting.property.getKey() + " on namespace " + namespace + " to " + newValue);
try {
- env.getAccumuloClient().namespaceOperations()
- .setProperty(namespace, setting.property.getKey(), "" + newValue);
+ env.getAccumuloClient().namespaceOperations().setProperty(namespace,
+ setting.property.getKey(), "" + newValue);
} catch (AccumuloException ex) {
if (ex.getCause() instanceof ThriftTableOperationException) {
ThriftTableOperationException ttoe = (ThriftTableOperationException) ex.getCause();
@@ -246,8 +246,8 @@
long newValue = random.nextLong(setting.min, setting.max);
state.set(LAST_SETTING, "" + choice);
log.debug("Setting " + setting.property.getKey() + " to " + newValue);
- env.getAccumuloClient().instanceOperations()
- .setProperty(setting.property.getKey(), "" + newValue);
+ env.getAccumuloClient().instanceOperations().setProperty(setting.property.getKey(),
+ "" + newValue);
}
}
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/IsolatedScan.java b/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/IsolatedScan.java
index 14fb2ee..43112ce 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/IsolatedScan.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/IsolatedScan.java
@@ -51,8 +51,8 @@
String tableName = tableNames.get(rand.nextInt(tableNames.size()));
try {
- RowIterator iter = new RowIterator(new IsolatedScanner(client.createScanner(tableName,
- Authorizations.EMPTY)));
+ RowIterator iter = new RowIterator(
+ new IsolatedScanner(client.createScanner(tableName, Authorizations.EMPTY)));
while (iter.hasNext()) {
PeekingIterator<Entry<Key,Value>> row = Iterators.peekingIterator(iter.next());
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/Replication.java b/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/Replication.java
index d77675d..cb60e11 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/Replication.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/concurrent/Replication.java
@@ -178,8 +178,8 @@
// junit isn't a dependency
private void assertEquals(int expected, int actual) {
if (expected != actual)
- throw new RuntimeException(String.format("%d fails to match expected value %d", actual,
- expected));
+ throw new RuntimeException(
+ String.format("%d fails to match expected value %d", actual, expected));
}
// junit isn't a dependency
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/conditional/Setup.java b/src/main/java/org/apache/accumulo/testing/randomwalk/conditional/Setup.java
index 631d31f..b4fbb99 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/conditional/Setup.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/conditional/Setup.java
@@ -49,8 +49,8 @@
env.getAccumuloClient().tableOperations().create(tableName);
log.debug("created table " + tableName);
boolean blockCache = rand.nextBoolean();
- env.getAccumuloClient().tableOperations()
- .setProperty(tableName, Property.TABLE_BLOCKCACHE_ENABLED.getKey(), blockCache + "");
+ env.getAccumuloClient().tableOperations().setProperty(tableName,
+ Property.TABLE_BLOCKCACHE_ENABLED.getKey(), blockCache + "");
log.debug("set " + Property.TABLE_BLOCKCACHE_ENABLED.getKey() + " " + blockCache);
} catch (TableExistsException tee) {}
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/conditional/Transfer.java b/src/main/java/org/apache/accumulo/testing/randomwalk/conditional/Transfer.java
index d16d283..a8fa018 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/conditional/Transfer.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/conditional/Transfer.java
@@ -111,8 +111,8 @@
int amt = rand.nextInt(50);
- log.debug("transfer req " + bank + " " + amt + " " + acct1 + " " + a1 + " " + acct2 + " "
- + a2);
+ log.debug(
+ "transfer req " + bank + " " + amt + " " + acct1 + " " + a1 + " " + acct2 + " " + a2);
if (a1.bal >= amt) {
ConditionalMutation cm = new ConditionalMutation(bank,
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/image/ImageFixture.java b/src/main/java/org/apache/accumulo/testing/randomwalk/image/ImageFixture.java
index 6e6a9b7..02c7add 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/image/ImageFixture.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/image/ImageFixture.java
@@ -29,8 +29,8 @@
import org.apache.accumulo.core.client.MultiTableBatchWriter;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.testing.randomwalk.RandWalkEnv;
import org.apache.accumulo.testing.randomwalk.Fixture;
+import org.apache.accumulo.testing.randomwalk.RandWalkEnv;
import org.apache.accumulo.testing.randomwalk.State;
import org.apache.hadoop.io.Text;
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/image/ScanMeta.java b/src/main/java/org/apache/accumulo/testing/randomwalk/image/ScanMeta.java
index 1355381..f9a47f3 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/image/ScanMeta.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/image/ScanMeta.java
@@ -102,8 +102,8 @@
if (!hashes.equals(hashes2)) {
log.error("uuids from doc table : " + hashes.values());
log.error("uuids from index : " + hashes2.values());
- throw new Exception("Mismatch between document table and index " + indexTableName + " "
- + imageTableName);
+ throw new Exception(
+ "Mismatch between document table and index " + indexTableName + " " + imageTableName);
}
indexScanner.close();
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/image/Write.java b/src/main/java/org/apache/accumulo/testing/randomwalk/image/Write.java
index ae3be22..4cf1390 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/image/Write.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/image/Write.java
@@ -78,8 +78,8 @@
state.set("totalWrites", totalWrites);
// set count
- m.put(META_COLUMN_FAMILY, COUNT_COLUMN_QUALIFIER, new Value(String.format("%d", totalWrites)
- .getBytes(UTF_8)));
+ m.put(META_COLUMN_FAMILY, COUNT_COLUMN_QUALIFIER,
+ new Value(String.format("%d", totalWrites).getBytes(UTF_8)));
// add mutation
imagesBW.addMutation(m);
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/multitable/CopyTool.java b/src/main/java/org/apache/accumulo/testing/randomwalk/multitable/CopyTool.java
index 2ac69fb..54866f2 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/multitable/CopyTool.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/multitable/CopyTool.java
@@ -20,12 +20,12 @@
import java.util.Properties;
import org.apache.accumulo.core.client.Accumulo;
-import org.apache.accumulo.hadoop.mapreduce.AccumuloInputFormat;
-import org.apache.accumulo.hadoop.mapreduce.AccumuloOutputFormat;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.hadoop.mapreduce.AccumuloInputFormat;
+import org.apache.accumulo.hadoop.mapreduce.AccumuloOutputFormat;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/multitable/MultiTableFixture.java b/src/main/java/org/apache/accumulo/testing/randomwalk/multitable/MultiTableFixture.java
index 59ee619..1c65118 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/multitable/MultiTableFixture.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/multitable/MultiTableFixture.java
@@ -24,8 +24,8 @@
import org.apache.accumulo.core.client.MultiTableBatchWriter;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.testing.randomwalk.RandWalkEnv;
import org.apache.accumulo.testing.randomwalk.Fixture;
+import org.apache.accumulo.testing.randomwalk.RandWalkEnv;
import org.apache.accumulo.testing.randomwalk.State;
public class MultiTableFixture extends Fixture {
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/security/AlterTable.java b/src/main/java/org/apache/accumulo/testing/randomwalk/security/AlterTable.java
index d01a6fc..c24b961 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/security/AlterTable.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/security/AlterTable.java
@@ -36,8 +36,8 @@
@Override
public void visit(State state, RandWalkEnv env, Properties props) throws Exception {
String systemUser = WalkingSecurity.get(state, env).getSysUserName();
- try (AccumuloClient client = env.createClient(systemUser, WalkingSecurity.get(state, env)
- .getSysToken())) {
+ try (AccumuloClient client = env.createClient(systemUser,
+ WalkingSecurity.get(state, env).getSysToken())) {
String tableName = WalkingSecurity.get(state, env).getTableName();
@@ -59,8 +59,9 @@
throw new AccumuloException("Got unexpected ae error code", ae);
}
}
- String newTableName = String.format("security_%s_%s_%d", InetAddress.getLocalHost()
- .getHostName().replaceAll("[-.]", "_"), env.getPid(), System.currentTimeMillis());
+ String newTableName = String.format("security_%s_%s_%d",
+ InetAddress.getLocalHost().getHostName().replaceAll("[-.]", "_"), env.getPid(),
+ System.currentTimeMillis());
renameTable(client, state, env, tableName, newTableName, hasPermission, exists);
}
@@ -74,8 +75,8 @@
} catch (AccumuloSecurityException ae) {
if (ae.getSecurityErrorCode().equals(SecurityErrorCode.PERMISSION_DENIED)) {
if (hasPermission)
- throw new AccumuloException(
- "Got a security exception when I should have had permission.", ae);
+ throw new AccumuloException("Got a security exception when I should have had permission.",
+ ae);
else
return;
} else if (ae.getSecurityErrorCode().equals(SecurityErrorCode.BAD_CREDENTIALS)) {
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/security/AlterTablePerm.java b/src/main/java/org/apache/accumulo/testing/randomwalk/security/AlterTablePerm.java
index e70d486..27b73a6 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/security/AlterTablePerm.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/security/AlterTablePerm.java
@@ -62,8 +62,8 @@
} else
tabPerm = TablePermission.valueOf(perm);
String tableName = WalkingSecurity.get(state, env).getTableName();
- boolean hasPerm = WalkingSecurity.get(state, env)
- .hasTablePermission(target, tableName, tabPerm);
+ boolean hasPerm = WalkingSecurity.get(state, env).hasTablePermission(target, tableName,
+ tabPerm);
boolean canGive;
String sourceUser;
AuthenticationToken sourceToken;
@@ -195,8 +195,8 @@
if (!tableExists)
throw new AccumuloException("Table shouldn't have existed, but apparently does");
if (!canGive)
- throw new AccumuloException(client.whoami()
- + " shouldn't have been able to grant privilege");
+ throw new AccumuloException(
+ client.whoami() + " shouldn't have been able to grant privilege");
}
}
}
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/security/Authenticate.java b/src/main/java/org/apache/accumulo/testing/randomwalk/security/Authenticate.java
index 89f3290..db9c076 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/security/Authenticate.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/security/Authenticate.java
@@ -33,8 +33,8 @@
@Override
public void visit(State state, RandWalkEnv env, Properties props) throws Exception {
- authenticate(WalkingSecurity.get(state, env).getSysUserName(), WalkingSecurity.get(state, env)
- .getSysToken(), state, env, props);
+ authenticate(WalkingSecurity.get(state, env).getSysUserName(),
+ WalkingSecurity.get(state, env).getSysToken(), state, env, props);
}
public static void authenticate(String principal, AuthenticationToken token, State state,
@@ -56,8 +56,7 @@
byte[] password = Arrays.copyOf(WalkingSecurity.get(state, env).getUserPassword(target),
WalkingSecurity.get(state, env).getUserPassword(target).length);
boolean hasPermission = client.securityOperations().hasSystemPermission(principal,
- SystemPermission.SYSTEM)
- || principal.equals(target);
+ SystemPermission.SYSTEM) || principal.equals(target);
if (!success)
for (int i = 0; i < password.length; i++)
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/security/ChangePass.java b/src/main/java/org/apache/accumulo/testing/randomwalk/security/ChangePass.java
index b75f58a..b2d0221 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/security/ChangePass.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/security/ChangePass.java
@@ -57,8 +57,7 @@
targetExists = WalkingSecurity.get(state, env).userExists(target);
hasPerm = client.securityOperations().hasSystemPermission(principal,
- SystemPermission.ALTER_USER)
- || principal.equals(target);
+ SystemPermission.ALTER_USER) || principal.equals(target);
Random r = new Random();
@@ -73,8 +72,9 @@
switch (ae.getSecurityErrorCode()) {
case PERMISSION_DENIED:
if (hasPerm)
- throw new AccumuloException("Change failed when it should have succeeded to change "
- + target + "'s password", ae);
+ throw new AccumuloException(
+ "Change failed when it should have succeeded to change " + target + "'s password",
+ ae);
return;
case USER_DOESNT_EXIST:
if (targetExists)
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/security/CreateUser.java b/src/main/java/org/apache/accumulo/testing/randomwalk/security/CreateUser.java
index b6b1ca2..a4b242b 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/security/CreateUser.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/security/CreateUser.java
@@ -32,8 +32,8 @@
@Override
public void visit(State state, RandWalkEnv env, Properties props) throws Exception {
String sysPrincipal = WalkingSecurity.get(state, env).getSysUserName();
- try (AccumuloClient client = env.createClient(sysPrincipal, WalkingSecurity.get(state, env)
- .getSysToken())) {
+ try (AccumuloClient client = env.createClient(sysPrincipal,
+ WalkingSecurity.get(state, env).getSysToken())) {
String tableUserName = WalkingSecurity.get(state, env).getTabUserName();
@@ -52,8 +52,8 @@
else {
// create user anyway for sake of state
if (!exists) {
- env.getAccumuloClient().securityOperations()
- .createLocalUser(tableUserName, tabUserPass);
+ env.getAccumuloClient().securityOperations().createLocalUser(tableUserName,
+ tabUserPass);
WalkingSecurity.get(state, env).createUser(tableUserName, tabUserPass);
Thread.sleep(1000);
}
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/security/DropUser.java b/src/main/java/org/apache/accumulo/testing/randomwalk/security/DropUser.java
index 9718dde..d5e96b0 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/security/DropUser.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/security/DropUser.java
@@ -31,8 +31,8 @@
@Override
public void visit(State state, RandWalkEnv env, Properties props) throws Exception {
String sysPrincipal = WalkingSecurity.get(state, env).getSysUserName();
- try (AccumuloClient client = env.createClient(sysPrincipal, WalkingSecurity.get(state, env)
- .getSysToken())) {
+ try (AccumuloClient client = env.createClient(sysPrincipal,
+ WalkingSecurity.get(state, env).getSysToken())) {
String tableUserName = WalkingSecurity.get(state, env).getTabUserName();
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/security/SecurityFixture.java b/src/main/java/org/apache/accumulo/testing/randomwalk/security/SecurityFixture.java
index 9d34282..ecbd807 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/security/SecurityFixture.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/security/SecurityFixture.java
@@ -25,8 +25,8 @@
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.SystemPermission;
import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.testing.randomwalk.RandWalkEnv;
import org.apache.accumulo.testing.randomwalk.Fixture;
+import org.apache.accumulo.testing.randomwalk.RandWalkEnv;
import org.apache.accumulo.testing.randomwalk.State;
public class SecurityFixture extends Fixture {
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/security/TableOp.java b/src/main/java/org/apache/accumulo/testing/randomwalk/security/TableOp.java
index 8e671c4..7e217c3 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/security/TableOp.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/security/TableOp.java
@@ -57,8 +57,8 @@
@Override
public void visit(State state, RandWalkEnv env, Properties props) throws Exception {
String tablePrincipal = WalkingSecurity.get(state, env).getTabUserName();
- try (AccumuloClient client = env.createClient(tablePrincipal, WalkingSecurity.get(state, env)
- .getTabToken())) {
+ try (AccumuloClient client = env.createClient(tablePrincipal,
+ WalkingSecurity.get(state, env).getTabToken())) {
TableOperations tableOps = client.tableOperations();
SecurityOperations secOps = client.securityOperations();
@@ -88,8 +88,8 @@
Authorizations auths = secOps.getUserAuthorizations(tablePrincipal);
boolean ambiguousZone = WalkingSecurity.get(state, env).inAmbiguousZone(client.whoami(),
tp);
- boolean ambiguousAuths = WalkingSecurity.get(state, env).ambiguousAuthorizations(
- client.whoami());
+ boolean ambiguousAuths = WalkingSecurity.get(state, env)
+ .ambiguousAuthorizations(client.whoami());
Scanner scan = null;
try {
@@ -101,8 +101,8 @@
Key k = entry.getKey();
seen++;
if (!auths.contains(k.getColumnVisibilityData()) && !ambiguousAuths)
- throw new AccumuloException("Got data I should not be capable of seeing: " + k
- + " table " + tableName);
+ throw new AccumuloException(
+ "Got data I should not be capable of seeing: " + k + " table " + tableName);
}
if (!canRead && !ambiguousZone)
throw new AccumuloException(
@@ -117,8 +117,8 @@
throw new AccumuloException("Got mismatched amounts of data");
} catch (TableNotFoundException tnfe) {
if (tableExists)
- throw new AccumuloException(
- "Accumulo and test suite out of sync: table " + tableName, tnfe);
+ throw new AccumuloException("Accumulo and test suite out of sync: table " + tableName,
+ tnfe);
return;
} catch (AccumuloSecurityException ae) {
if (ae.getSecurityErrorCode().equals(SecurityErrorCode.PERMISSION_DENIED)) {
@@ -137,8 +137,8 @@
throw new AccumuloException("Unexpected exception!", ae);
} catch (RuntimeException re) {
if (re.getCause() instanceof AccumuloSecurityException
- && ((AccumuloSecurityException) re.getCause()).getSecurityErrorCode().equals(
- SecurityErrorCode.PERMISSION_DENIED)) {
+ && ((AccumuloSecurityException) re.getCause()).getSecurityErrorCode()
+ .equals(SecurityErrorCode.PERMISSION_DENIED)) {
if (canRead && !ambiguousZone)
throw new AccumuloException(
"Table read permission out of sync with Accumulo: table " + tableName,
@@ -147,8 +147,8 @@
return;
}
if (re.getCause() instanceof AccumuloSecurityException
- && ((AccumuloSecurityException) re.getCause()).getSecurityErrorCode().equals(
- SecurityErrorCode.BAD_AUTHORIZATIONS)) {
+ && ((AccumuloSecurityException) re.getCause()).getSecurityErrorCode()
+ .equals(SecurityErrorCode.BAD_AUTHORIZATIONS)) {
if (ambiguousAuths)
return;
else
@@ -248,8 +248,8 @@
} catch (AccumuloSecurityException ae) {
if (ae.getSecurityErrorCode().equals(SecurityErrorCode.PERMISSION_DENIED)) {
if (secOps.hasTablePermission(tablePrincipal, tableName, TablePermission.BULK_IMPORT))
- throw new AccumuloException("Bulk Import failed when it should have worked: "
- + tableName);
+ throw new AccumuloException(
+ "Bulk Import failed when it should have worked: " + tableName);
return;
} else if (ae.getSecurityErrorCode().equals(SecurityErrorCode.BAD_CREDENTIALS)) {
if (WalkingSecurity.get(state, env).userPassTransient(client.whoami()))
@@ -263,8 +263,8 @@
fs.delete(fail, true);
if (!secOps.hasTablePermission(tablePrincipal, tableName, TablePermission.BULK_IMPORT))
- throw new AccumuloException("Bulk Import succeeded when it should have failed: " + dir
- + " table " + tableName);
+ throw new AccumuloException(
+ "Bulk Import succeeded when it should have failed: " + dir + " table " + tableName);
break;
case ALTER_TABLE:
boolean tablePerm;
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/security/Validate.java b/src/main/java/org/apache/accumulo/testing/randomwalk/security/Validate.java
index 466c163..2874f3a 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/security/Validate.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/security/Validate.java
@@ -45,8 +45,8 @@
if (tableExists != cloudTableExists)
throw new AccumuloException("Table existance out of sync");
- boolean tableUserExists = WalkingSecurity.get(state, env).userExists(
- WalkingSecurity.get(state, env).getTabUserName());
+ boolean tableUserExists = WalkingSecurity.get(state, env)
+ .userExists(WalkingSecurity.get(state, env).getTabUserName());
boolean cloudTableUserExists = client.securityOperations().listLocalUsers()
.contains(WalkingSecurity.get(state, env).getTabUserName());
if (tableUserExists != cloudTableUserExists)
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/security/WalkingSecurity.java b/src/main/java/org/apache/accumulo/testing/randomwalk/security/WalkingSecurity.java
index 695fdd1..9d81b00 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/security/WalkingSecurity.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/security/WalkingSecurity.java
@@ -162,8 +162,8 @@
setTabPerm(state, user, permission, table, false);
}
- public void cleanTablePermissions(String table) throws AccumuloSecurityException,
- TableNotFoundException {
+ public void cleanTablePermissions(String table)
+ throws AccumuloSecurityException, TableNotFoundException {
for (String user : new String[] {getSysUserName(), getTabUserName()}) {
for (TablePermission tp : TablePermission.values()) {
revokeTablePermission(user, table, tp);
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/sequential/BatchVerify.java b/src/main/java/org/apache/accumulo/testing/randomwalk/sequential/BatchVerify.java
index 3788a4f..badef95 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/sequential/BatchVerify.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/sequential/BatchVerify.java
@@ -64,8 +64,8 @@
rangeEnd = numWrites - 1;
}
count += rangeEnd - rangeStart + 1;
- ranges.add(new Range(new Text(String.format("%010d", rangeStart)), new Text(String.format(
- "%010d", rangeEnd))));
+ ranges.add(new Range(new Text(String.format("%010d", rangeStart)),
+ new Text(String.format("%010d", rangeEnd))));
}
ranges = Range.mergeOverlapping(ranges);
@@ -76,7 +76,8 @@
if (count == 0 || ranges.size() == 0)
return;
- log.debug(String.format("scanning %d rows in the following %d ranges:", count, ranges.size()));
+ log.debug(
+ String.format("scanning %d rows in the following %d ranges:", count, ranges.size()));
for (Range r : ranges) {
log.debug(r.toString());
}
@@ -97,8 +98,8 @@
boolean done = false;
for (Range r : ranges) {
int start = Integer.parseInt(r.getStartKey().getRow().toString());
- int end = Integer.parseInt(String.copyValueOf(r.getEndKey().getRow().toString()
- .toCharArray(), 0, 10));
+ int end = Integer
+ .parseInt(String.copyValueOf(r.getEndKey().getRow().toString().toCharArray(), 0, 10));
for (int i = start; i <= end; i++) {
if (done) {
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/sequential/MapRedVerifyTool.java b/src/main/java/org/apache/accumulo/testing/randomwalk/sequential/MapRedVerifyTool.java
index 465e63f..f1e560c 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/sequential/MapRedVerifyTool.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/sequential/MapRedVerifyTool.java
@@ -21,11 +21,11 @@
import java.util.Properties;
import org.apache.accumulo.core.client.Accumulo;
-import org.apache.accumulo.hadoop.mapreduce.AccumuloInputFormat;
-import org.apache.accumulo.hadoop.mapreduce.AccumuloOutputFormat;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.hadoop.mapreduce.AccumuloInputFormat;
+import org.apache.accumulo.hadoop.mapreduce.AccumuloOutputFormat;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
@@ -71,8 +71,8 @@
writeMutation(output, start, index);
}
- private void writeMutation(Context output, int start, int end) throws IOException,
- InterruptedException {
+ private void writeMutation(Context output, int start, int end)
+ throws IOException, InterruptedException {
Mutation m = new Mutation(new Text(String.format("%010d", start)));
m.put(new Text(String.format("%010d", end)), new Text(""), new Value(new byte[0]));
output.write(null, m);
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/sequential/SequentialFixture.java b/src/main/java/org/apache/accumulo/testing/randomwalk/sequential/SequentialFixture.java
index f949cce..937458a 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/sequential/SequentialFixture.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/sequential/SequentialFixture.java
@@ -22,8 +22,8 @@
import org.apache.accumulo.core.client.MultiTableBatchWriter;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.testing.randomwalk.RandWalkEnv;
import org.apache.accumulo.testing.randomwalk.Fixture;
+import org.apache.accumulo.testing.randomwalk.RandWalkEnv;
import org.apache.accumulo.testing.randomwalk.State;
public class SequentialFixture extends Fixture {
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/BulkInsert.java b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/BulkInsert.java
index ee59a4c..cbc6589 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/BulkInsert.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/BulkInsert.java
@@ -131,10 +131,10 @@
dataWriter.close();
indexWriter.close();
- sort(state, env, fs, dataTableName, rootDir + "/data.seq", rootDir + "/data_bulk", rootDir
- + "/data_work", maxSplits);
- sort(state, env, fs, indexTableName, rootDir + "/index.seq", rootDir + "/index_bulk", rootDir
- + "/index_work", maxSplits);
+ sort(state, env, fs, dataTableName, rootDir + "/data.seq", rootDir + "/data_bulk",
+ rootDir + "/data_work", maxSplits);
+ sort(state, env, fs, indexTableName, rootDir + "/index.seq", rootDir + "/index_bulk",
+ rootDir + "/index_work", maxSplits);
bulkImport(fs, state, env, dataTableName, rootDir, "data");
bulkImport(fs, state, env, indexTableName, rootDir, "index");
@@ -171,8 +171,9 @@
private void sort(State state, RandWalkEnv env, FileSystem fs, String tableName, String seqFile,
String outputDir, String workDir, int maxSplits) throws Exception {
- PrintStream out = new PrintStream(new BufferedOutputStream(fs.create(new Path(workDir
- + "/splits.txt"))), false, UTF_8.name());
+ PrintStream out = new PrintStream(
+ new BufferedOutputStream(fs.create(new Path(workDir + "/splits.txt"))), false,
+ UTF_8.name());
AccumuloClient client = env.getAccumuloClient();
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/CloneIndex.java b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/CloneIndex.java
index 4d232b3..334beae 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/CloneIndex.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/CloneIndex.java
@@ -35,10 +35,8 @@
long t1 = System.currentTimeMillis();
env.getAccumuloClient().tableOperations().flush(indexTableName, null, null, true);
long t2 = System.currentTimeMillis();
- env.getAccumuloClient()
- .tableOperations()
- .clone(indexTableName, tmpIndexTableName, false, new HashMap<String,String>(),
- new HashSet<String>());
+ env.getAccumuloClient().tableOperations().clone(indexTableName, tmpIndexTableName, false,
+ new HashMap<String,String>(), new HashSet<String>());
long t3 = System.currentTimeMillis();
log.debug("Cloned " + tmpIndexTableName + " from " + indexTableName + " flush: " + (t2 - t1)
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/CompactFilter.java b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/CompactFilter.java
index 1406acd..3c09149 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/CompactFilter.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/CompactFilter.java
@@ -57,8 +57,8 @@
documentFilters.add(is);
long t1 = System.currentTimeMillis();
- env.getAccumuloClient().tableOperations()
- .compact(docTableName, null, null, documentFilters, true, true);
+ env.getAccumuloClient().tableOperations().compact(docTableName, null, null, documentFilters,
+ true, true);
long t2 = System.currentTimeMillis();
long t3 = t2 - t1;
@@ -70,12 +70,12 @@
indexFilters.add(is);
t1 = System.currentTimeMillis();
- env.getAccumuloClient().tableOperations()
- .compact(indexTableName, null, null, indexFilters, true, true);
+ env.getAccumuloClient().tableOperations().compact(indexTableName, null, null, indexFilters,
+ true, true);
t2 = System.currentTimeMillis();
- log.debug("Filtered documents using compaction iterators " + regex + " " + (t3) + " "
- + (t2 - t1));
+ log.debug(
+ "Filtered documents using compaction iterators " + regex + " " + (t3) + " " + (t2 - t1));
BatchScanner bscanner = env.getAccumuloClient().createBatchScanner(docTableName,
new Authorizations(), 10);
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/ExportIndex.java b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/ExportIndex.java
index f55b363..7c81e3c 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/ExportIndex.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/ExportIndex.java
@@ -55,8 +55,8 @@
// disable spits, so that splits can be compared later w/o worrying one
// table splitting and the other not
- env.getAccumuloClient().tableOperations()
- .setProperty(indexTableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "20G");
+ env.getAccumuloClient().tableOperations().setProperty(indexTableName,
+ Property.TABLE_SPLIT_THRESHOLD.getKey(), "20G");
long t1 = System.currentTimeMillis();
@@ -70,8 +70,8 @@
long t3 = System.currentTimeMillis();
// copy files
- BufferedReader reader = new BufferedReader(new InputStreamReader(fs.open(new Path(exportDir,
- "distcp.txt")), UTF_8));
+ BufferedReader reader = new BufferedReader(
+ new InputStreamReader(fs.open(new Path(exportDir, "distcp.txt")), UTF_8));
String file = null;
while ((file = reader.readLine()) != null) {
Path src = new Path(file);
@@ -91,10 +91,10 @@
fs.delete(new Path(exportDir), true);
fs.delete(new Path(copyDir), true);
- HashSet<Text> splits1 = new HashSet<>(env.getAccumuloClient().tableOperations()
- .listSplits(indexTableName));
- HashSet<Text> splits2 = new HashSet<>(env.getAccumuloClient().tableOperations()
- .listSplits(tmpIndexTableName));
+ HashSet<Text> splits1 = new HashSet<>(
+ env.getAccumuloClient().tableOperations().listSplits(indexTableName));
+ HashSet<Text> splits2 = new HashSet<>(
+ env.getAccumuloClient().tableOperations().listSplits(tmpIndexTableName));
if (!splits1.equals(splits2))
throw new Exception("Splits not equals " + indexTableName + " " + tmpIndexTableName);
@@ -113,10 +113,10 @@
throw new Exception("Props not equals " + indexTableName + " " + tmpIndexTableName);
// unset the split threshold
- env.getAccumuloClient().tableOperations()
- .removeProperty(indexTableName, Property.TABLE_SPLIT_THRESHOLD.getKey());
- env.getAccumuloClient().tableOperations()
- .removeProperty(tmpIndexTableName, Property.TABLE_SPLIT_THRESHOLD.getKey());
+ env.getAccumuloClient().tableOperations().removeProperty(indexTableName,
+ Property.TABLE_SPLIT_THRESHOLD.getKey());
+ env.getAccumuloClient().tableOperations().removeProperty(tmpIndexTableName,
+ Property.TABLE_SPLIT_THRESHOLD.getKey());
log.debug("Imported " + tmpIndexTableName + " from " + indexTableName + " flush: " + (t2 - t1)
+ "ms export: " + (t3 - t2) + "ms copy:" + (t4 - t3) + "ms import:" + (t5 - t4) + "ms");
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/Grep.java b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/Grep.java
index eb1dc08..3f289d0 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/Grep.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/Grep.java
@@ -72,8 +72,8 @@
for (int i = 0; i < words.length; i++) {
IteratorSetting more = new IteratorSetting(20 + i, "ii" + i, RegExFilter.class);
- RegExFilter
- .setRegexs(more, null, null, null, "(^|(.*\\s))" + words[i] + "($|(\\s.*))", false);
+ RegExFilter.setRegexs(more, null, null, null, "(^|(.*\\s))" + words[i] + "($|(\\s.*))",
+ false);
bs.addScanIterator(more);
}
@@ -88,9 +88,9 @@
bs.close();
if (!documentsFoundInIndex.equals(documentsFoundByGrep)) {
- throw new Exception("Set of documents found not equal for words "
- + Arrays.asList(words).toString() + " " + documentsFoundInIndex + " "
- + documentsFoundByGrep);
+ throw new Exception(
+ "Set of documents found not equal for words " + Arrays.asList(words).toString() + " "
+ + documentsFoundInIndex + " " + documentsFoundByGrep);
}
log.debug("Grep and index agree " + Arrays.asList(words).toString() + " "
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/Merge.java b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/Merge.java
index eaa9253..0420eb0 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/Merge.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/Merge.java
@@ -45,8 +45,8 @@
// throw an excpetion so that test will die an no further changes to
// table will occur...
// this way table is left as is for debugging.
- throw new Exception("There are more tablets after a merge: " + splits.size() + " was "
- + splitSet.size());
+ throw new Exception(
+ "There are more tablets after a merge: " + splits.size() + " was " + splitSet.size());
}
}
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/ShardFixture.java b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/ShardFixture.java
index e451ef1..cdcd0e6 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/ShardFixture.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/ShardFixture.java
@@ -25,8 +25,8 @@
import org.apache.accumulo.core.client.MultiTableBatchWriter;
import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.testing.randomwalk.RandWalkEnv;
import org.apache.accumulo.testing.randomwalk.Fixture;
+import org.apache.accumulo.testing.randomwalk.RandWalkEnv;
import org.apache.accumulo.testing.randomwalk.State;
import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
@@ -66,10 +66,10 @@
log.info("Added " + splits.size() + " splits to " + name);
if (enableCache) {
- client.tableOperations()
- .setProperty(name, Property.TABLE_INDEXCACHE_ENABLED.getKey(), "true");
- client.tableOperations()
- .setProperty(name, Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "true");
+ client.tableOperations().setProperty(name, Property.TABLE_INDEXCACHE_ENABLED.getKey(),
+ "true");
+ client.tableOperations().setProperty(name, Property.TABLE_BLOCKCACHE_ENABLED.getKey(),
+ "true");
log.info("Enabled caching for table " + name);
}
diff --git a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/VerifyIndex.java b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/VerifyIndex.java
index 9b2741b..554d541 100644
--- a/src/main/java/org/apache/accumulo/testing/randomwalk/shard/VerifyIndex.java
+++ b/src/main/java/org/apache/accumulo/testing/randomwalk/shard/VerifyIndex.java
@@ -63,8 +63,8 @@
}
if (iter.hasNext())
- throw new Exception("index rebuild mismatch " + iter.next().getKey() + " "
- + tmpIndexTableName);
+ throw new Exception(
+ "index rebuild mismatch " + iter.next().getKey() + " " + tmpIndexTableName);
log.debug("Verified " + count + " index entries ");
diff --git a/src/main/java/org/apache/accumulo/testing/scalability/Run.java b/src/main/java/org/apache/accumulo/testing/scalability/Run.java
index 1c93390..f8d8479 100644
--- a/src/main/java/org/apache/accumulo/testing/scalability/Run.java
+++ b/src/main/java/org/apache/accumulo/testing/scalability/Run.java
@@ -78,8 +78,9 @@
log.error("Error loading config file.", e);
}
- ScaleTest test = (ScaleTest) Class.forName(
- String.format("org.apache.accumulo.test.scalability.%s", opts.testId)).newInstance();
+ ScaleTest test = (ScaleTest) Class
+ .forName(String.format("org.apache.accumulo.test.scalability.%s", opts.testId))
+ .newInstance();
test.init(scaleProps, testProps, opts.numTabletServers);
diff --git a/src/main/java/org/apache/accumulo/testing/scalability/ScaleTest.java b/src/main/java/org/apache/accumulo/testing/scalability/ScaleTest.java
index f36ba40..fe57264 100644
--- a/src/main/java/org/apache/accumulo/testing/scalability/ScaleTest.java
+++ b/src/main/java/org/apache/accumulo/testing/scalability/ScaleTest.java
@@ -48,8 +48,8 @@
String password = this.scaleProps.getProperty("PASSWORD");
System.out.println(password);
- client = Accumulo.newClient().to(instanceName, zookeepers)
- .as(user, new PasswordToken(password)).build();
+ client = Accumulo.newClient().to(instanceName, zookeepers).as(user, new PasswordToken(password))
+ .build();
}
protected void startTimer() {
diff --git a/src/main/java/org/apache/accumulo/testing/stress/ScanOpts.java b/src/main/java/org/apache/accumulo/testing/stress/ScanOpts.java
index b403e98..05e832d 100644
--- a/src/main/java/org/apache/accumulo/testing/stress/ScanOpts.java
+++ b/src/main/java/org/apache/accumulo/testing/stress/ScanOpts.java
@@ -16,9 +16,10 @@
*/
package org.apache.accumulo.testing.stress;
-import com.beust.jcommander.Parameter;
import org.apache.accumulo.testing.cli.ClientOpts;
+import com.beust.jcommander.Parameter;
+
class ScanOpts extends ClientOpts {
@Parameter(names = {"-t", "--table"}, description = "table to use")
String tableName = WriteOptions.DEFAULT_TABLE;
diff --git a/src/main/java/org/apache/accumulo/testing/stress/Write.java b/src/main/java/org/apache/accumulo/testing/stress/Write.java
index ec07156..2b92b59 100644
--- a/src/main/java/org/apache/accumulo/testing/stress/Write.java
+++ b/src/main/java/org/apache/accumulo/testing/stress/Write.java
@@ -52,15 +52,15 @@
}
DataWriter dw = new DataWriter(c.createBatchWriter(opts.tableName), new RandomMutations(
- // rows
+ // rows
new RandomByteArrays(new RandomWithinRange(opts.row_seed, opts.rowMin(), opts.rowMax())),
// cfs
new RandomByteArrays(new RandomWithinRange(opts.cf_seed, opts.cfMin(), opts.cfMax())),
// cqs
new RandomByteArrays(new RandomWithinRange(opts.cq_seed, opts.cqMin(), opts.cqMax())),
// vals
- new RandomByteArrays(new RandomWithinRange(opts.value_seed, opts.valueMin(),
- opts.valueMax())),
+ new RandomByteArrays(
+ new RandomWithinRange(opts.value_seed, opts.valueMin(), opts.valueMax())),
// number of cells per row
new RandomWithinRange(opts.row_width_seed, opts.rowWidthMin(), opts.rowWidthMax()),
// max cells per mutation
diff --git a/src/main/java/org/apache/accumulo/testing/stress/WriteOptions.java b/src/main/java/org/apache/accumulo/testing/stress/WriteOptions.java
index 33ec845..a8f8321 100644
--- a/src/main/java/org/apache/accumulo/testing/stress/WriteOptions.java
+++ b/src/main/java/org/apache/accumulo/testing/stress/WriteOptions.java
@@ -16,9 +16,10 @@
*/
package org.apache.accumulo.testing.stress;
-import com.beust.jcommander.Parameter;
import org.apache.accumulo.testing.cli.ClientOpts;
+import com.beust.jcommander.Parameter;
+
class WriteOptions extends ClientOpts {
static final String DEFAULT_TABLE = "stress_test";
static final int DEFAULT_MIN = 1, DEFAULT_MAX = 128, DEFAULT_SPREAD = DEFAULT_MAX - DEFAULT_MIN;
@@ -123,17 +124,15 @@
if (min_ref == null && max_ref != null) {
// we don't support just specifying a max yet
- throw new IllegalArgumentException(
- String
- .format(
- "[%s] Maximum value supplied, but no minimum. Must supply a minimum with a maximum value.",
- label));
+ throw new IllegalArgumentException(String.format(
+ "[%s] Maximum value supplied, but no minimum. Must supply a minimum with a maximum value.",
+ label));
} else if (min_ref != null && max_ref != null) {
// if a user supplied lower and upper bounds, we need to verify
// that min <= max
if (min_ref.compareTo(max_ref) > 0) {
- throw new IllegalArgumentException(String.format(
- "[%s] Min value (%d) is greater than max value (%d)", label, min_ref, max_ref));
+ throw new IllegalArgumentException(String
+ .format("[%s] Min value (%d) is greater than max value (%d)", label, min_ref, max_ref));
}
}
}
diff --git a/src/main/java/org/apache/accumulo/testing/stress/package-info.java b/src/main/java/org/apache/accumulo/testing/stress/package-info.java
index 658938a..765a7fc 100644
--- a/src/main/java/org/apache/accumulo/testing/stress/package-info.java
+++ b/src/main/java/org/apache/accumulo/testing/stress/package-info.java
@@ -15,22 +15,26 @@
* limitations under the License.
*/
/**
- * This package contains utility classes designed to test Accumulo when large cells are being written. This is an attempt to observe the behavior Accumulo
- * displays when compacting and reading these cells.
+ * This package contains utility classes designed to test Accumulo when large cells are being
+ * written. This is an attempt to observe the behavior Accumulo displays when compacting and reading
+ * these cells.
*
- * There are two components to this package: {@link org.apache.accumulo.testing.stress.Write} and {@link org.apache.accumulo.testing.stress.Scan}.
+ * There are two components to this package: {@link org.apache.accumulo.testing.stress.Write} and
+ * {@link org.apache.accumulo.testing.stress.Scan}.
*
- * The {@link org.apache.accumulo.testing.stress.Write} provides facilities for writing random sized cells. Users can configure minimum and maximum
- * sized portions of a cell. The portions users can configure are the row, column family, column qualifier and value. Note that the sizes are uniformly
- * distributed between the minimum and maximum values. See {@link org.apache.accumulo.testing.stress.WriteOptions} for available options and default sizing
+ * The {@link org.apache.accumulo.testing.stress.Write} provides facilities for writing random sized
+ * cells. Users can configure minimum and maximum sized portions of a cell. The portions users can
+ * configure are the row, column family, column qualifier and value. Note that the sizes are
+ * uniformly distributed between the minimum and maximum values. See
+ * {@link org.apache.accumulo.testing.stress.WriteOptions} for available options and default sizing
* information.
*
- * The Scan provides users with the ability to query tables generated by the Write. It will pick a tablet at random and scan the entire range. The
- * amount of times this process is done is user configurable. By default, it happens 1,024 times. Users can also specify whether or not the scan should be
- * isolated or not.
+ * The Scan provides users with the ability to query tables generated by the Write. It will pick a
+ * tablet at random and scan the entire range. The amount of times this process is done is user
+ * configurable. By default, it happens 1,024 times. Users can also specify whether or not the scan
+ * should be isolated or not.
*
- * There is no shared state intended by either of these services. This allows multiple clients to be run in parallel, either on the same host or distributed
- * across hosts.
+ * There is no shared state intended by either of these services. This allows multiple clients to be
+ * run in parallel, either on the same host or distributed across hosts.
*/
package org.apache.accumulo.testing.stress;
-
diff --git a/src/test/java/org/apache/accumulo/testing/randomwalk/FrameworkTest.java b/src/test/java/org/apache/accumulo/testing/randomwalk/FrameworkTest.java
index de78794..d56eed5 100644
--- a/src/test/java/org/apache/accumulo/testing/randomwalk/FrameworkTest.java
+++ b/src/test/java/org/apache/accumulo/testing/randomwalk/FrameworkTest.java
@@ -39,8 +39,8 @@
// Need to use fully qualified name here because of conflict with
// org.apache.accumulo.testing.randomwalk.Test
@org.junit.Test
- public void testXML() throws SAXException, URISyntaxException, ParserConfigurationException,
- IOException {
+ public void testXML()
+ throws SAXException, URISyntaxException, ParserConfigurationException, IOException {
SchemaFactory sf = SchemaFactory.newInstance(XMLConstants.W3C_XML_SCHEMA_NS_URI);
Schema moduleSchema = sf.newSchema(getFile("/randomwalk/module.xsd"));
@@ -50,8 +50,8 @@
DocumentBuilder docbuilder = dbf.newDocumentBuilder();
Document document = docbuilder.parse(getFile("/randomwalk/modules/unit/Basic.xml"));
- assertNotEquals("Parsing randomwalk xml should result in nodes.", 0, document.getChildNodes()
- .getLength());
+ assertNotEquals("Parsing randomwalk xml should result in nodes.", 0,
+ document.getChildNodes().getLength());
}
private File getFile(String resource) throws URISyntaxException {