Update tracing example to use new OpenTelemetry tracing objects (#96)

Closes #88 
diff --git a/README.md b/README.md
index b853474..98d044a 100644
--- a/README.md
+++ b/README.md
@@ -87,6 +87,7 @@
 | [spark] | Using Accumulo as input and output for Apache Spark jobs |
 | [tabletofile] | Using MapReduce to read a table and write one of its columns to a file in HDFS. |
 | [terasort] | Generating random data and sorting it using Accumulo. |
+| [tracing] | Generating trace data in a client application and Accumulo. |
 | [uniquecols] | Use MapReduce to count unique columns in Accumulo |
 | [visibility] | Using visibilities (or combinations of authorizations). Also shows user permissions. |
 | [wordcount] | Use MapReduce and Accumulo to do a word count on text files |
@@ -125,6 +126,7 @@
 [spark]: spark/README.md
 [tabletofile]: docs/tabletofile.md
 [terasort]: docs/terasort.md
+[tracing]: docs/tracing.md
 [uniquecols]: docs/uniquecols.md
 [visibility]: docs/visibility.md
 [wordcount]: docs/wordcount.md
diff --git a/bin/runex b/bin/runex
index e116fe2..e53f4aa 100755
--- a/bin/runex
+++ b/bin/runex
@@ -41,5 +41,5 @@
 "$ex_home"/bin/build
 
 export CLASSPATH="$EXAMPLES_JAR_PATH:$ex_home/conf:$ACCUMULO_HOME/lib/*:$CLASSPATH"
-java "org.apache.accumulo.examples.$1" ${*:2}
+java ${ACCUMULO_JAVA_OPTS} "org.apache.accumulo.examples.$1" ${*:2}
 
diff --git a/docs/tracing.md b/docs/tracing.md
new file mode 100644
index 0000000..71b8ab5
--- /dev/null
+++ b/docs/tracing.md
@@ -0,0 +1,41 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+# Apache Accumulo Tracing Example
+
+This tutorial uses the [TracingExample.java] Java class to create an OpenTelemetry
+span in the TracingExample application and then create and read entries from Accumulo.
+Tracing output should show up in logs for the TracingExample class and the Accumulo client,
+and logs for the Accumulo server processes.
+
+## Setup
+
+  1. Download the OpenTelemetry [JavaAgent] jar file and place it into the `/path/to/accumulo/lib/` directory.
+  2. Add the property `general.opentelemetry.enabled=true` to accumulo.properties
+  3. Set the following environment variables in your environment:
+  
+    ACCUMULO_JAVA_OPTS="-javaagent:/path/to/accumulo/lib/opentelemetry-javaagent-1.12.1.jar"
+    OTEL_TRACES_EXPORTER="logging"
+
+## Run the Example
+
+  1. Start Accumulo
+  2. Run the Tracing Example:
+  
+    $ ./bin/runex client.TracingExample --createtable --deletetable --create --read --table traceTest
+
+[JavaAgent]: https://search.maven.org/remotecontent?filepath=io/opentelemetry/javaagent/opentelemetry-javaagent/1.12.1/opentelemetry-javaagent-1.12.1.jar
+[TracingExample.java]: ../src/main/java/org/apache/accumulo/examples/client/TracingExample.java
diff --git a/pom.xml b/pom.xml
index a5747c3..3af185d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -31,8 +31,6 @@
     <accumulo.version>2.1.0-SNAPSHOT</accumulo.version>
     <eclipseFormatterStyle>contrib/Eclipse-Accumulo-Codestyle.xml</eclipseFormatterStyle>
     <hadoop.version>3.3.0</hadoop.version>
-    <htrace.hadoop.version>4.1.0-incubating</htrace.hadoop.version>
-    <htrace.version>3.2.0-incubating</htrace.version>
     <maven.compiler.release>11</maven.compiler.release>
     <maven.compiler.source>11</maven.compiler.source>
     <maven.compiler.target>11</maven.compiler.target>
@@ -51,11 +49,6 @@
         <type>pom</type>
         <scope>import</scope>
       </dependency>
-      <dependency>
-        <groupId>org.apache.htrace</groupId>
-        <artifactId>htrace-core</artifactId>
-        <version>${htrace.version}</version>
-      </dependency>
     </dependencies>
   </dependencyManagement>
   <dependencies>
@@ -68,6 +61,14 @@
       <artifactId>guava</artifactId>
     </dependency>
     <dependency>
+      <groupId>io.opentelemetry</groupId>
+      <artifactId>opentelemetry-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.opentelemetry</groupId>
+      <artifactId>opentelemetry-context</artifactId>
+    </dependency>
+    <dependency>
       <groupId>org.apache.accumulo</groupId>
       <artifactId>accumulo-core</artifactId>
     </dependency>
@@ -80,10 +81,6 @@
       <artifactId>hadoop-client-api</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.apache.htrace</groupId>
-      <artifactId>htrace-core</artifactId>
-    </dependency>
-    <dependency>
       <groupId>org.apache.logging.log4j</groupId>
       <artifactId>log4j-1.2-api</artifactId>
     </dependency>
diff --git a/src/main/java/org/apache/accumulo/examples/client/TracingExample.java b/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
index 77c04ea..1a13f7a 100644
--- a/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
+++ b/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
@@ -17,8 +17,7 @@
 
 package org.apache.accumulo.examples.client;
 
-import static java.nio.charset.StandardCharsets.UTF_8;
-
+import java.time.Instant;
 import java.util.Map.Entry;
 
 import org.apache.accumulo.core.client.AccumuloClient;
@@ -32,18 +31,19 @@
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
-// import org.apache.accumulo.core.trace.DistributedTrace;
 import org.apache.accumulo.examples.Common;
 import org.apache.accumulo.examples.cli.ClientOnDefaultTable;
 import org.apache.accumulo.examples.cli.ScannerOpts;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.beust.jcommander.Parameter;
 
+import io.opentelemetry.api.GlobalOpenTelemetry;
+import io.opentelemetry.api.trace.Span;
+import io.opentelemetry.api.trace.Tracer;
+import io.opentelemetry.context.Scope;
+
 /**
  * A simple example showing how to use the distributed tracing API in client code
  *
@@ -53,6 +53,7 @@
   private static final String DEFAULT_TABLE_NAME = "test";
 
   private final AccumuloClient client;
+  private final Tracer tracer;
 
   static class Opts extends ClientOnDefaultTable {
     @Parameter(names = {"--createtable"}, description = "create table before doing anything")
@@ -72,30 +73,33 @@
 
   private TracingExample(AccumuloClient client) {
     this.client = client;
-  }
-
-  private void enableTracing() {
-    // DistributedTrace.enable("myHost", "myApp");
+    this.tracer = GlobalOpenTelemetry.get().getTracer(TracingExample.class.getSimpleName());
   }
 
   private void execute(Opts opts) throws TableNotFoundException, AccumuloException,
       AccumuloSecurityException, TableExistsException {
 
-    if (opts.createtable) {
-      Common.createTableWithNamespace(client, opts.getTableName());
+    Span span = tracer.spanBuilder("trace example").startSpan();
+    try (Scope scope = span.makeCurrent()) {
+      if (opts.createtable) {
+        Common.createTableWithNamespace(client, opts.getTableName());
+      }
+
+      if (opts.createEntries) {
+        createEntries(opts);
+      }
+
+      if (opts.readEntries) {
+        readEntries(opts);
+      }
+
+      if (opts.deletetable) {
+        client.tableOperations().delete(opts.getTableName());
+      }
+    } finally {
+      span.end();
     }
 
-    if (opts.createEntries) {
-      createEntries(opts);
-    }
-
-    if (opts.readEntries) {
-      readEntries(opts);
-    }
-
-    if (opts.deletetable) {
-      client.tableOperations().delete(opts.getTableName());
-    }
   }
 
   private void createEntries(Opts opts) throws TableNotFoundException, AccumuloException {
@@ -104,41 +108,40 @@
     // the write operation as it is occurs asynchronously. You can optionally create additional
     // Spans
     // within a given Trace as seen below around the flush
-    TraceScope scope = Trace.startSpan("Client Write", Sampler.ALWAYS);
+    Span span = tracer.spanBuilder("createEntries").startSpan();
+    try (Scope scope = span.makeCurrent()) {
+      try (BatchWriter batchWriter = client.createBatchWriter(opts.getTableName())) {
+        Mutation m = new Mutation("row");
+        m.put("cf", "cq", "value");
 
-    System.out.println("TraceID: " + Long.toHexString(scope.getSpan().getTraceId()));
-    try (BatchWriter batchWriter = client.createBatchWriter(opts.getTableName())) {
-      Mutation m = new Mutation("row");
-      m.put("cf", "cq", "value");
-
-      batchWriter.addMutation(m);
-      // You can add timeline annotations to Spans which will be able to be viewed in the Monitor
-      scope.getSpan().addTimelineAnnotation("Initiating Flush");
-      batchWriter.flush();
+        batchWriter.addMutation(m);
+        // You can add timeline annotations to Spans which will be able to be viewed in the Monitor
+        span.addEvent("Initiating Flush", Instant.now());
+        batchWriter.flush();
+      }
+    } finally {
+      span.end();
     }
-    scope.close();
+
   }
 
-  @SuppressWarnings("deprecation")
   private void readEntries(Opts opts) throws TableNotFoundException {
 
-    Scanner scanner = client.createScanner(opts.getTableName(), opts.auths);
-
-    // Trace the read operation.
-    TraceScope readScope = Trace.startSpan("Client Read", Sampler.ALWAYS);
-    System.out.println("TraceID: " + Long.toHexString(readScope.getSpan().getTraceId()));
-
-    int numberOfEntriesRead = 0;
-    for (Entry<Key,Value> entry : scanner) {
-      System.out.println(entry.getKey().toString() + " -> " + entry.getValue().toString());
-      ++numberOfEntriesRead;
+    try (Scanner scanner = client.createScanner(opts.getTableName(), opts.auths)) {
+      // Trace the read operation.
+      Span span = tracer.spanBuilder("readEntries").startSpan();
+      try (Scope scope = span.makeCurrent()) {
+        int numberOfEntriesRead = 0;
+        for (Entry<Key,Value> entry : scanner) {
+          System.out.println(entry.getKey().toString() + " -> " + entry.getValue().toString());
+          ++numberOfEntriesRead;
+        }
+        // You can add additional metadata (key, values) to Spans
+        span.setAttribute("Number of Entries Read", numberOfEntriesRead);
+      } finally {
+        span.end();
+      }
     }
-    // You can add additional metadata (key, values) to Spans which will be able to be viewed in the
-    // Monitor
-    readScope.getSpan().addKVAnnotation("Number of Entries Read".getBytes(UTF_8),
-        String.valueOf(numberOfEntriesRead).getBytes(UTF_8));
-
-    readScope.close();
   }
 
   public static void main(String[] args) {
@@ -148,7 +151,6 @@
 
     try (AccumuloClient client = opts.createAccumuloClient()) {
       TracingExample tracingExample = new TracingExample(client);
-      tracingExample.enableTracing();
       tracingExample.execute(opts);
     } catch (Exception e) {
       log.error("Caught exception running TraceExample", e);