ACCUMULO-4804 Fixes to work with 2.0
diff --git a/docs/bulkIngest.md b/docs/bulkIngest.md
index 22bf07c..b856d83 100644
--- a/docs/bulkIngest.md
+++ b/docs/bulkIngest.md
@@ -24,10 +24,10 @@
 accumulo. Then we verify the 1000 rows are in accumulo.
 
     $ PKG=org.apache.accumulo.examples.mapreduce.bulk
-    $ ARGS="-i instance -z zookeepers -u username -p password"
+    $ ARGS="-c examples.conf"
     $ accumulo $PKG.SetupTable $ARGS -t test_bulk row_00000333 row_00000666
     $ accumulo $PKG.GenerateTestData --start-row 0 --count 1000 --output bulk/test_1.txt
-    $ accumulo-util hadoop-jar target/accumulo-examples.jar $PKG.BulkIngestExample $ARGS -t test_bulk --inputDir bulk --workDir tmp/bulkWork
+    $ accumulo-util hadoop-jar target/accumulo-examples-X.Y.Z.jar $PKG.BulkIngestExample $ARGS -t test_bulk --inputDir bulk --workDir tmp/bulkWork
     $ accumulo $PKG.VerifyIngest $ARGS -t test_bulk --start-row 0 --count 1000
 
 For a high level discussion of bulk ingest, see the docs dir.
diff --git a/pom.xml b/pom.xml
index 834c761..3d426f3 100644
--- a/pom.xml
+++ b/pom.xml
@@ -33,7 +33,7 @@
   <description>Example code and corresponding documentation for using Apache Accumulo</description>
 
   <properties>
-    <accumulo.version>1.8.1</accumulo.version>
+    <accumulo.version>2.0.0-SNAPSHOT</accumulo.version>
     <hadoop.version>2.6.4</hadoop.version>
     <slf4j.version>1.7.21</slf4j.version>
     <maven.compiler.source>1.8</maven.compiler.source>
diff --git a/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java b/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java
index e08dfb8..4df1eae 100644
--- a/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java
+++ b/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java
@@ -16,6 +16,7 @@
  */
 package org.apache.accumulo.examples.cli;
 
+import java.io.File;
 import java.time.Duration;
 
 import org.apache.accumulo.core.client.AccumuloException;
@@ -85,12 +86,12 @@
     }
   }
 
-  public static class PropertiesConverter implements IStringConverter<Configuration> {
+  public static class PropertiesConverter implements IStringConverter<File> {
     @Override
-    public Configuration convert(String filename) {
+    public File convert(String filename) {
       try {
-        return new PropertiesConfiguration(filename);
-      } catch (ConfigurationException e) {
+        return new File(filename);
+      } catch (Exception e) {
         throw new RuntimeException(e);
       }
     }
@@ -98,14 +99,14 @@
 
   @Parameter(names = {"-c", "--conf"}, required = true, converter = PropertiesConverter.class,
       description = "Config file for connecting to Accumulo.  See README.md for details.")
-  private Configuration config = null;
+  private File config = null;
 
   @Parameter(names = {"-auths", "--auths"}, converter = AuthConverter.class, description = "the authorizations to use when reading or writing")
   public Authorizations auths = Authorizations.EMPTY;
 
   public Connector getConnector() {
     try {
-      ZooKeeperInstance zki = new ZooKeeperInstance(config);
+      ZooKeeperInstance zki = new ZooKeeperInstance(getClientConfiguration());
       return zki.getConnector(getPrincipal(), getToken());
     } catch (AccumuloException | AccumuloSecurityException e) {
       throw new RuntimeException(e);
@@ -113,14 +114,24 @@
   }
 
   public ClientConfiguration getClientConfiguration() {
-    return new ClientConfiguration(config);
+    return ClientConfiguration.fromFile(config);
   }
 
   public String getPrincipal() {
-    return config.getString("accumulo.examples.principal", "root");
+    String user = getClientConfiguration().getString("accumulo.examples.principal");
+    if(user != null)
+      return user;
+
+    return "root";
   }
 
   public AuthenticationToken getToken() {
-    return new PasswordToken(config.getString("accumulo.examples.password", "secret"));
+    AuthenticationToken token = new PasswordToken("secret");
+    String password = getClientConfiguration().getString("accumulo.examples.password");
+    if(password != null){
+      token = new PasswordToken(password);
+    }
+
+    return token;
   }
 }
diff --git a/src/main/java/org/apache/accumulo/examples/reservations/ARS.java b/src/main/java/org/apache/accumulo/examples/reservations/ARS.java
index fb0277c..47dfe97 100644
--- a/src/main/java/org/apache/accumulo/examples/reservations/ARS.java
+++ b/src/main/java/org/apache/accumulo/examples/reservations/ARS.java
@@ -281,7 +281,7 @@
       } else if (tokens[0].equals("quit") && tokens.length == 1) {
         break;
       } else if (tokens[0].equals("connect") && tokens.length == 6 && ars == null) {
-        ZooKeeperInstance zki = new ZooKeeperInstance(new ClientConfiguration().withInstance(tokens[1]).withZkHosts(tokens[2]));
+        ZooKeeperInstance zki = new ZooKeeperInstance(ClientConfiguration.create().withInstance(tokens[1]).withZkHosts(tokens[2]));
         Connector conn = zki.getConnector(tokens[3], new PasswordToken(tokens[4]));
         if (conn.tableOperations().exists(tokens[5])) {
           ars = new ARS(conn, tokens[5]);
diff --git a/src/test/java/org/apache/accumulo/examples/ExamplesIT.java b/src/test/java/org/apache/accumulo/examples/ExamplesIT.java
index a1e1b9d..7240304 100644
--- a/src/test/java/org/apache/accumulo/examples/ExamplesIT.java
+++ b/src/test/java/org/apache/accumulo/examples/ExamplesIT.java
@@ -124,6 +124,7 @@
   public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopConf) {
     // 128MB * 3
     cfg.setDefaultMemory(cfg.getDefaultMemory() * 3, MemoryUnit.BYTE);
+    cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, "false");
   }
 
   @Before
diff --git a/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java b/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java
index e243f49..f080728 100644
--- a/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java
+++ b/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java
@@ -26,6 +26,7 @@
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
@@ -33,7 +34,9 @@
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.examples.cli.BatchWriterOpts;
 import org.apache.accumulo.examples.cli.ScannerOpts;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.junit.Before;
 import org.junit.Test;
@@ -43,6 +46,11 @@
   private Connector conn;
   private String tableName;
 
+  @Override
+  protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+    cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, "false");
+  }
+
   @Before
   public void setupInstance() throws Exception {
     tableName = getUniqueNames(1)[0];
diff --git a/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java b/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java
index 55975b6..744b002 100644
--- a/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java
+++ b/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java
@@ -34,13 +34,16 @@
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.examples.ExamplesIT;
 import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
 import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
@@ -51,6 +54,11 @@
     return 60;
   }
 
+  @Override
+  protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+    cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, "false");
+  }
+
   public static final String hadoopTmpDirArg = "-Dhadoop.tmp.dir=" + System.getProperty("user.dir") + "/target/hadoop-tmp";
 
   static final String tablename = "mapredf";