Fixes #10 - Update helloworld example to new Connector builder (#11)
* Example also now uses conf/accumulo-client.properties
* Added a log4j.properties file for examples
diff --git a/bin/runex b/bin/runex
index aa55ef3..fc273ad 100755
--- a/bin/runex
+++ b/bin/runex
@@ -22,4 +22,4 @@
av_arg="-Daccumulo.version=`accumulo version | tail -n 1`"
fi
-mvn -q exec:java -Dexec.mainClass="org.apache.accumulo.examples.$main_class" $av_arg -Dexec.args="$main_args"
+mvn -q exec:java -Dlog4j.configuration="file:./conf/log4j.properties" -Dexec.mainClass="org.apache.accumulo.examples.$main_class" $av_arg -Dexec.args="$main_args"
diff --git a/conf/accumulo-client.properties b/conf/accumulo-client.properties
new file mode 100644
index 0000000..eb8757d
--- /dev/null
+++ b/conf/accumulo-client.properties
@@ -0,0 +1,120 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+################################
+## Accumulo client configuration
+################################
+
+## NOTE - All properties that have a default are set with it. Properties that
+## are uncommented must be set by the user.
+
+## Instance properties
+## --------------
+## Name of Accumulo instance to connect to
+instance.name=
+
+## Zookeeper connection information for Accumulo instance
+instance.zookeepers=localhost:2181
+
+## Zookeeper session timeout (in seconds)
+#instance.zookeepers.timeout.sec=30
+
+
+## Authentication properties
+## --------------
+## Authentication method (i.e password, kerberos, provider). Set more properties for chosen method below.
+auth.method=password
+
+## Accumulo username/principal for chosen authentication method
+auth.username=
+
+## Path to Kerberos keytab
+#auth.kerberos.keytab.path=
+
+## Accumulo user password
+auth.password=
+
+## Alias used to extract Accumulo user password from CredentialProvider
+#auth.provider.name=
+
+## Comma separated list of URLs defining CredentialProvider(s)
+#auth.provider.urls=
+
+
+## Batch Writer properties
+## --------------
+## Change the durability for the BatchWriter session. To use the table's durability setting. use "default" which is the table's durability setting.
+#batch.writer.durability=default
+
+## Max amount of time (in seconds) to hold data in memory before flushing it
+#batch.writer.max.latency.sec=120
+
+## Max memory (in bytes) to batch before writing
+#batch.writer.max.memory.bytes=52428800
+
+## Max amount of time (in seconds) an unresponsive server will be re-tried. An exception is thrown when this timeout is exceeded. Set to zero for no timeout.
+#batch.writer.max.timeout.sec=0
+
+## Maximum number of threads to use for writing data to tablet servers.
+#batch.writer.max.write.threads=3
+
+
+## SSL properties
+## --------------
+## Enable SSL for client RPC
+#ssl.enabled=false
+
+## Password used to encrypt keystore
+#ssl.keystore.password=
+
+## Path to SSL keystore file
+#ssl.keystore.path=
+
+## Type of SSL keystore
+#ssl.keystore.type=jks
+
+## Password used to encrypt truststore
+#ssl.truststore.password=
+
+## Path to SSL truststore file
+#ssl.truststore.path=
+
+## Type of SSL truststore
+#ssl.truststore.type=jks
+
+## Use JSSE system properties to configure SSL
+#ssl.use.jsse=false
+
+
+## SASL properties
+## --------------
+## Enable SASL for client RPC
+#sasl.enabled=false
+
+## Kerberos principal/primary that Accumulo servers use to login
+#sasl.kerberos.server.primary=accumulo
+
+## SASL quality of protection. Valid values are 'auth', 'auth-int', and 'auth-conf'
+#sasl.qop=auth
+
+
+## Tracing properties
+## --------------
+## A list of span receiver classes to send trace spans
+#trace.span.receivers=org.apache.accumulo.tracer.ZooTraceClient
+
+## The zookeeper node where tracers are registered
+#trace.zookeeper.path=/tracers
+
diff --git a/conf/log4j.properties b/conf/log4j.properties
new file mode 100644
index 0000000..3cc3900
--- /dev/null
+++ b/conf/log4j.properties
@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+log4j.rootLogger=INFO, CA
+log4j.appender.CA=org.apache.log4j.ConsoleAppender
+log4j.appender.CA.layout=org.apache.log4j.PatternLayout
+log4j.appender.CA.layout.ConversionPattern=%d{ISO8601} [%c{3}] %-5p: %m%n
+
+log4j.logger.org.apache.accumulo=INFO
+log4j.logger.org.apache.accumulo.examples=TRACE
+log4j.logger.org.apache.curator=ERROR
+log4j.logger.org.apache.hadoop=WARN
+log4j.logger.org.apache.hadoop.mapreduce=ERROR
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+log4j.logger.org.apache.zookeeper=ERROR
diff --git a/docs/helloworld.md b/docs/helloworld.md
index 04b9f98..eb75991 100644
--- a/docs/helloworld.md
+++ b/docs/helloworld.md
@@ -19,32 +19,24 @@
This tutorial uses the following Java classes:
* [InsertWithBatchWriter.java] - Inserts 10K rows (50K entries) into accumulo with each row having 5 entries
- * [ReadData.java] - Reads all data between two rows
+ * [ReadData.java] - Reads data between two rows
-Log into the accumulo shell:
+Inserts data with a BatchWriter:
- $ accumulo shell -u username -p password
-
-Create a table called 'hellotable':
-
- username@instance> createtable hellotable
-
-Launch a Java program that inserts data with a BatchWriter:
-
- $ ./bin/runex helloworld.InsertWithBatchWriter -c ./examples.conf -t hellotable
+ $ ./bin/runex helloworld.InsertWithBatchWriter
On the accumulo status page at the URL below (where 'master' is replaced with the name or IP of your accumulo master), you should see 50K entries
http://master:9995/
-To view the entries, use the shell to scan the table:
+To view the entries, use the shell (run `accumulo shell -u username -p password` to access it) to scan the table:
username@instance> table hellotable
username@instance hellotable> scan
You can also use a Java class to scan the table:
- $ ./bin/runex helloworld.ReadData -c ./examples.conf -t hellotable --startKey row_0 --endKey row_1001
+ $ ./bin/runex helloworld.ReadData
[InsertWithBatchWriter.java]: ../src/main/java/org/apache/accumulo/examples/helloworld/InsertWithBatchWriter.java
[ReadData.java]: ../src/main/java/org/apache/accumulo/examples/helloworld/ReadData.java
diff --git a/src/main/java/org/apache/accumulo/examples/helloworld/InsertWithBatchWriter.java b/src/main/java/org/apache/accumulo/examples/helloworld/InsertWithBatchWriter.java
index f61063d..dfe464e 100644
--- a/src/main/java/org/apache/accumulo/examples/helloworld/InsertWithBatchWriter.java
+++ b/src/main/java/org/apache/accumulo/examples/helloworld/InsertWithBatchWriter.java
@@ -20,46 +20,41 @@
import org.apache.accumulo.core.client.AccumuloSecurityException;
import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MultiTableBatchWriter;
-import org.apache.accumulo.core.client.MutationsRejectedException;
import org.apache.accumulo.core.client.TableExistsException;
import org.apache.accumulo.core.client.TableNotFoundException;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.examples.cli.BatchWriterOpts;
-import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
-import org.apache.hadoop.io.Text;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Inserts 10K rows (50K entries) into accumulo with each row having 5 entries.
*/
public class InsertWithBatchWriter {
- public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, MutationsRejectedException, TableExistsException,
- TableNotFoundException {
- ClientOnRequiredTable opts = new ClientOnRequiredTable();
- BatchWriterOpts bwOpts = new BatchWriterOpts();
- opts.parseArgs(InsertWithBatchWriter.class.getName(), args, bwOpts);
+ private static final Logger log = LoggerFactory.getLogger(InsertWithBatchWriter.class);
- Connector connector = opts.getConnector();
- MultiTableBatchWriter mtbw = connector.createMultiTableBatchWriter(bwOpts.getBatchWriterConfig());
+ public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
- if (!connector.tableOperations().exists(opts.getTableName()))
- connector.tableOperations().create(opts.getTableName());
- BatchWriter bw = mtbw.getBatchWriter(opts.getTableName());
-
- Text colf = new Text("colfam");
- System.out.println("writing ...");
- for (int i = 0; i < 10000; i++) {
- Mutation m = new Mutation(new Text(String.format("row_%d", i)));
- for (int j = 0; j < 5; j++) {
- m.put(colf, new Text(String.format("colqual_%d", j)), new Value((String.format("value_%d_%d", i, j)).getBytes()));
- }
- bw.addMutation(m);
- if (i % 100 == 0)
- System.out.println(i);
+ Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
+ try {
+ connector.tableOperations().create("hellotable");
+ } catch (TableExistsException e) {
+ // ignore
}
- mtbw.close();
- }
+ try (BatchWriter bw = connector.createBatchWriter("hellotable")) {
+ log.trace("writing ...");
+ for (int i = 0; i < 10000; i++) {
+ Mutation m = new Mutation(String.format("row_%d", i));
+ for (int j = 0; j < 5; j++) {
+ m.put("colfam", String.format("colqual_%d", j), new Value((String.format("value_%d_%d", i, j)).getBytes()));
+ }
+ bw.addMutation(m);
+ if (i % 100 == 0) {
+ log.trace(String.valueOf(i));
+ }
+ }
+ }
+ }
}
diff --git a/src/main/java/org/apache/accumulo/examples/helloworld/ReadData.java b/src/main/java/org/apache/accumulo/examples/helloworld/ReadData.java
index 9237e5a..da5baf8 100644
--- a/src/main/java/org/apache/accumulo/examples/helloworld/ReadData.java
+++ b/src/main/java/org/apache/accumulo/examples/helloworld/ReadData.java
@@ -16,7 +16,6 @@
*/
package org.apache.accumulo.examples.helloworld;
-import java.util.Iterator;
import java.util.Map.Entry;
import org.apache.accumulo.core.client.AccumuloException;
@@ -27,52 +26,27 @@
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.examples.cli.ClientOnRequiredTable;
-import org.apache.accumulo.examples.cli.ScannerOpts;
-import org.apache.hadoop.io.Text;
+import org.apache.accumulo.core.security.Authorizations;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.beust.jcommander.Parameter;
-
/**
- * Reads all data between two rows; all data after a given row; or all data in a table, depending on the number of arguments given.
+ * Reads all data between two rows
*/
public class ReadData {
private static final Logger log = LoggerFactory.getLogger(ReadData.class);
- static class Opts extends ClientOnRequiredTable {
- @Parameter(names = "--startKey")
- String startKey;
- @Parameter(names = "--endKey")
- String endKey;
- }
-
public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
- Opts opts = new Opts();
- ScannerOpts scanOpts = new ScannerOpts();
- opts.parseArgs(ReadData.class.getName(), args, scanOpts);
- Connector connector = opts.getConnector();
+ Connector connector = Connector.builder().usingProperties("conf/accumulo-client.properties").build();
- Scanner scan = connector.createScanner(opts.getTableName(), opts.auths);
- scan.setBatchSize(scanOpts.scanBatchSize);
- Key start = null;
- if (opts.startKey != null)
- start = new Key(new Text(opts.startKey));
- Key end = null;
- if (opts.endKey != null)
- end = new Key(new Text(opts.endKey));
- scan.setRange(new Range(start, end));
- Iterator<Entry<Key,Value>> iter = scan.iterator();
-
- while (iter.hasNext()) {
- Entry<Key,Value> e = iter.next();
- Text colf = e.getKey().getColumnFamily();
- Text colq = e.getKey().getColumnQualifier();
- log.trace("row: " + e.getKey().getRow() + ", colf: " + colf + ", colq: " + colq);
- log.trace(", value: " + e.getValue().toString());
+ try (Scanner scan = connector.createScanner("hellotable", Authorizations.EMPTY)) {
+ scan.setRange(new Range(new Key("row_0"), new Key("row_1002")));
+ for (Entry<Key, Value> e : scan) {
+ Key key = e.getKey();
+ log.trace(key.getRow() + " " + key.getColumnFamily() + " " + key.getColumnQualifier() + " " + e.getValue());
+ }
}
}
}