Pre-split tables at creation time in tests (#1892)

Convert tests that use pre-split tables to use NewTableConfiguration
to split tables at creation time, rather than using addSplits afterwards.
diff --git a/test/src/main/java/org/apache/accumulo/test/BulkImportMonitoringIT.java b/test/src/main/java/org/apache/accumulo/test/BulkImportMonitoringIT.java
index acaa4ea..6c88bb1 100644
--- a/test/src/main/java/org/apache/accumulo/test/BulkImportMonitoringIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/BulkImportMonitoringIT.java
@@ -22,6 +22,7 @@
 import static org.junit.Assert.assertTrue;
 
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.SortedSet;
 import java.util.TreeSet;
@@ -32,6 +33,7 @@
 
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.crypto.CryptoServiceFactory;
@@ -65,15 +67,20 @@
   public void test() throws Exception {
     getCluster().getClusterControl().start(ServerType.MONITOR);
     try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
+
+      // creating table name
       final String tableName = getUniqueNames(1)[0];
-      c.tableOperations().create(tableName);
-      c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1");
-      // splits to slow down bulk import
+      // creating splits
       SortedSet<Text> splits = new TreeSet<>();
       for (int i = 1; i < 0xf; i++) {
         splits.add(new Text(Integer.toHexString(i)));
       }
-      c.tableOperations().addSplits(tableName, splits);
+      // creating properties
+      HashMap<String,String> props = new HashMap<>();
+      props.put(Property.TABLE_MAJC_RATIO.getKey(), "1");
+      // creating table with configuration
+      var ntc = new NewTableConfiguration().setProperties(props).withSplits(splits);
+      c.tableOperations().create(tableName, ntc);
 
       MasterMonitorInfo stats = getCluster().getMasterMonitorInfo();
       assertEquals(1, stats.tServerInfo.size());
diff --git a/test/src/main/java/org/apache/accumulo/test/CompactionExecutorIT.java b/test/src/main/java/org/apache/accumulo/test/CompactionExecutorIT.java
index 749a791..45f5c56 100644
--- a/test/src/main/java/org/apache/accumulo/test/CompactionExecutorIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/CompactionExecutorIT.java
@@ -403,13 +403,13 @@
     String tableName = "tiwr";
 
     try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
-      client.tableOperations().create(tableName);
+
       SortedSet<Text> splits = new TreeSet<>();
-      splits.add(new Text("f"));
-      splits.add(new Text("m"));
-      splits.add(new Text("r"));
-      splits.add(new Text("t"));
-      client.tableOperations().addSplits(tableName, splits);
+      for (String s : List.of("f", "m", "r", "t"))
+        splits.add(new Text(s));
+
+      NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splits);
+      client.tableOperations().create(tableName, ntc);
 
       Map<String,String> expected = new TreeMap<>();
 
diff --git a/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java b/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
index b409430..0208a52 100644
--- a/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
@@ -874,8 +874,8 @@
     try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
 
-      client.tableOperations().create(tableName);
-      client.tableOperations().addSplits(tableName, nss("2", "4", "6"));
+      NewTableConfiguration ntc = new NewTableConfiguration().withSplits(nss("2", "4", "6"));
+      client.tableOperations().create(tableName, ntc);
 
       sleepUninterruptibly(2, TimeUnit.SECONDS);
 
@@ -1223,19 +1223,21 @@
     String tableName = getUniqueNames(1)[0];
     try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
 
-      client.tableOperations().create(tableName);
+      NewTableConfiguration ntc = new NewTableConfiguration();
 
       Random rand = new SecureRandom();
 
       switch (rand.nextInt(3)) {
         case 1:
-          client.tableOperations().addSplits(tableName, nss("4"));
+          ntc = ntc.withSplits(nss("4"));
           break;
         case 2:
-          client.tableOperations().addSplits(tableName, nss("3", "5"));
+          ntc = ntc.withSplits(nss("3", "5"));
           break;
       }
 
+      client.tableOperations().create(tableName, ntc);
+
       try (ConditionalWriter cw =
           client.createConditionalWriter(tableName, new ConditionalWriterConfig())) {
 
diff --git a/test/src/main/java/org/apache/accumulo/test/CountNameNodeOpsBulkIT.java b/test/src/main/java/org/apache/accumulo/test/CountNameNodeOpsBulkIT.java
index e449a04..c7163ba 100644
--- a/test/src/main/java/org/apache/accumulo/test/CountNameNodeOpsBulkIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/CountNameNodeOpsBulkIT.java
@@ -35,6 +35,7 @@
 
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.crypto.CryptoServiceFactory;
@@ -97,17 +98,20 @@
   public void compareOldNewBulkImportTest() throws Exception {
     try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
       getCluster().getClusterControl().kill(ServerType.GARBAGE_COLLECTOR, "localhost");
+
       final String tableName = getUniqueNames(1)[0];
-      c.tableOperations().create(tableName);
-      // turn off compactions
-      c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "2000");
-      c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "2000");
+      // disable compactions
+      Map<String,String> props = new HashMap<>();
+      props.put(Property.TABLE_MAJC_RATIO.getKey(), "2000");
+      props.put(Property.TABLE_FILE_MAX.getKey(), "2000");
       // splits to slow down bulk import
       SortedSet<Text> splits = new TreeSet<>();
       for (int i = 1; i < 0xf; i++) {
         splits.add(new Text(Integer.toHexString(i)));
       }
-      c.tableOperations().addSplits(tableName, splits);
+
+      var ntc = new NewTableConfiguration().setProperties(props).withSplits(splits);
+      c.tableOperations().create(tableName, ntc);
 
       MasterMonitorInfo stats = getCluster().getMasterMonitorInfo();
       assertEquals(1, stats.tServerInfo.size());
diff --git a/test/src/main/java/org/apache/accumulo/test/MasterRepairsDualAssignmentIT.java b/test/src/main/java/org/apache/accumulo/test/MasterRepairsDualAssignmentIT.java
index 011d3d7..b0a6354 100644
--- a/test/src/main/java/org/apache/accumulo/test/MasterRepairsDualAssignmentIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/MasterRepairsDualAssignmentIT.java
@@ -28,6 +28,7 @@
 
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.clientImpl.ClientContext;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.dataImpl.KeyExtent;
@@ -79,12 +80,12 @@
       c.securityOperations().grantTablePermission("root", MetadataTable.NAME,
           TablePermission.WRITE);
       c.securityOperations().grantTablePermission("root", RootTable.NAME, TablePermission.WRITE);
-      c.tableOperations().create(table);
       SortedSet<Text> partitions = new TreeSet<>();
       for (String part : "a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")) {
         partitions.add(new Text(part));
       }
-      c.tableOperations().addSplits(table, partitions);
+      NewTableConfiguration ntc = new NewTableConfiguration().withSplits(partitions);
+      c.tableOperations().create(table, ntc);
       // scan the metadata table and get the two table location states
       Set<TServerInstance> states = new HashSet<>();
       Set<TabletLocationState> oldLocations = new HashSet<>();
diff --git a/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java b/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java
index 67ad71f..56e8124 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestBinaryRows.java
@@ -30,6 +30,7 @@
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
@@ -208,8 +209,8 @@
         System.out.printf("added split point 0x%016x  %,12d%n", splitPoint, splitPoint);
       }
 
-      accumuloClient.tableOperations().create(opts.tableName);
-      accumuloClient.tableOperations().addSplits(opts.tableName, splits);
+      NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splits);
+      accumuloClient.tableOperations().create(opts.tableName, ntc);
 
     } else {
       throw new Exception("ERROR : " + opts.mode + " is not a valid operation.");
diff --git a/test/src/main/java/org/apache/accumulo/test/TestIngest.java b/test/src/main/java/org/apache/accumulo/test/TestIngest.java
index f706a77..d253ad2 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestIngest.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestIngest.java
@@ -36,6 +36,7 @@
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableExistsException;
 import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.client.security.SecurityErrorCode;
 import org.apache.accumulo.core.clientImpl.ClientContext;
 import org.apache.accumulo.core.conf.ClientProperty;
@@ -169,15 +170,17 @@
     if (params.createTable) {
       TreeSet<Text> splits =
           getSplitPoints(params.startRow, params.startRow + params.rows, params.numsplits);
-
+      // if the table does not exist, create it (with splits)
       if (!client.tableOperations().exists(params.tableName)) {
-        client.tableOperations().create(params.tableName);
-      }
-      try {
-        client.tableOperations().addSplits(params.tableName, splits);
-      } catch (TableNotFoundException ex) {
-        // unlikely
-        throw new RuntimeException(ex);
+        NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splits);
+        client.tableOperations().create(params.tableName, ntc);
+      } else { // if the table already exists, add splits to it
+        try {
+          client.tableOperations().addSplits(params.tableName, splits);
+        } catch (TableNotFoundException ex) {
+          // unlikely
+          throw new RuntimeException(ex);
+        }
       }
     }
   }
diff --git a/test/src/main/java/org/apache/accumulo/test/UserCompactionStrategyIT.java b/test/src/main/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
index b8dbe11..a4b8b1e 100644
--- a/test/src/main/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
@@ -27,11 +27,13 @@
 import java.io.IOException;
 import java.security.SecureRandom;
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Set;
+import java.util.SortedSet;
 import java.util.TreeSet;
 
 import org.apache.accumulo.core.client.Accumulo;
@@ -43,6 +45,7 @@
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.admin.CompactionConfig;
 import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.clientImpl.ClientContext;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
@@ -161,13 +164,13 @@
       File target = new File(System.getProperty("user.dir"), "target");
       assertTrue(target.mkdirs() || target.isDirectory());
       File destFile = installJar(target, "/TestCompactionStrat.jar");
-      c.tableOperations().create(tableName);
       c.instanceOperations().setProperty(
           Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "context1", destFile.toString());
-      c.tableOperations().setProperty(tableName, Property.TABLE_CLASSLOADER_CONTEXT.getKey(),
-          "context1");
-
-      c.tableOperations().addSplits(tableName, new TreeSet<>(Arrays.asList(new Text("efg"))));
+      HashMap<String,String> props = new HashMap<>();
+      props.put(Property.TABLE_CLASSLOADER_CONTEXT.getKey(), "context1");
+      SortedSet<Text> splits = new TreeSet<>(Arrays.asList(new Text("efg")));
+      var ntc = new NewTableConfiguration().setProperties(props).withSplits(splits);
+      c.tableOperations().create(tableName, ntc);
 
       writeFlush(c, tableName, "a");
       writeFlush(c, tableName, "b");
diff --git a/test/src/main/java/org/apache/accumulo/test/VerifySerialRecoveryIT.java b/test/src/main/java/org/apache/accumulo/test/VerifySerialRecoveryIT.java
index 08dda8a..d4511ef 100644
--- a/test/src/main/java/org/apache/accumulo/test/VerifySerialRecoveryIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/VerifySerialRecoveryIT.java
@@ -30,6 +30,7 @@
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.security.Authorizations;
@@ -83,12 +84,17 @@
     // make a table with many splits
     String tableName = getUniqueNames(1)[0];
     try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
-      c.tableOperations().create(tableName);
+
+      // create splits
       SortedSet<Text> splits = new TreeSet<>();
       for (int i = 0; i < 200; i++) {
         splits.add(new Text(randomHex(8)));
       }
-      c.tableOperations().addSplits(tableName, splits);
+
+      // create table with config
+      NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splits);
+      c.tableOperations().create(tableName, ntc);
+
       // load data to give the recovery something to do
       try (BatchWriter bw = c.createBatchWriter(tableName)) {
         for (int i = 0; i < 50000; i++) {
diff --git a/test/src/main/java/org/apache/accumulo/test/VolumeIT.java b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
index e7e5565..37081b9 100644
--- a/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
@@ -48,6 +48,7 @@
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.admin.CompactionConfig;
 import org.apache.accumulo.core.client.admin.DiskUsage;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.clientImpl.ClientContext;
 import org.apache.accumulo.core.conf.ClientProperty;
@@ -126,12 +127,13 @@
     // create a table
     try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
       String tableName = getUniqueNames(1)[0];
-      client.tableOperations().create(tableName);
+      // create set of splits
       SortedSet<Text> partitions = new TreeSet<>();
-      // with some splits
       for (String s : "d,m,t".split(","))
         partitions.add(new Text(s));
-      client.tableOperations().addSplits(tableName, partitions);
+      // create table with splits
+      NewTableConfiguration ntc = new NewTableConfiguration().withSplits(partitions);
+      client.tableOperations().create(tableName, ntc);
       // scribble over the splits
       VolumeChooserIT.writeDataToTable(client, tableName, VolumeChooserIT.alpha_rows);
       // write the data to disk, read it back
@@ -256,13 +258,14 @@
 
   private void writeData(String tableName, AccumuloClient client) throws AccumuloException,
       AccumuloSecurityException, TableExistsException, TableNotFoundException {
+
     TreeSet<Text> splits = new TreeSet<>();
     for (int i = 1; i < 100; i++) {
       splits.add(new Text(String.format("%06d", i * 100)));
     }
 
-    client.tableOperations().create(tableName);
-    client.tableOperations().addSplits(tableName, splits);
+    NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splits);
+    client.tableOperations().create(tableName, ntc);
 
     try (BatchWriter bw = client.createBatchWriter(tableName)) {
       for (int i = 0; i < 100; i++) {
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BinaryIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BinaryIT.java
index dec342f..86c4cd7 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/BinaryIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BinaryIT.java
@@ -23,6 +23,7 @@
 
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.harness.AccumuloClusterHarness;
 import org.apache.accumulo.test.TestBinaryRows;
 import org.apache.hadoop.io.Text;
@@ -48,11 +49,11 @@
   public void testPreSplit() throws Exception {
     String tableName = getUniqueNames(1)[0];
     try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
-      c.tableOperations().create(tableName);
       SortedSet<Text> splits = new TreeSet<>();
       splits.add(new Text("8"));
       splits.add(new Text("256"));
-      c.tableOperations().addSplits(tableName, splits);
+      NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splits);
+      c.tableOperations().create(tableName, ntc);
       runTest(c, tableName);
     }
   }
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BulkOldIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BulkOldIT.java
index 304bbf2..4aca716 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/BulkOldIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BulkOldIT.java
@@ -27,6 +27,7 @@
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.crypto.CryptoServiceFactory;
 import org.apache.accumulo.core.data.Key;
@@ -66,11 +67,11 @@
   public void testBulkFile() throws Exception {
     try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
-      c.tableOperations().create(tableName);
       SortedSet<Text> splits = new TreeSet<>();
       for (String split : "0333 0666 0999 1333 1666".split(" "))
         splits.add(new Text(split));
-      c.tableOperations().addSplits(tableName, splits);
+      NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splits);
+      c.tableOperations().create(tableName, ntc);
       Configuration conf = new Configuration();
       AccumuloConfiguration aconf = getCluster().getServerContext().getConfiguration();
       FileSystem fs = getCluster().getFileSystem();
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
index a78cbff..88df94e 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
@@ -43,6 +43,7 @@
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.admin.CloneConfiguration;
 import org.apache.accumulo.core.client.admin.DiskUsage;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.clientImpl.ClientContext;
 import org.apache.accumulo.core.clientImpl.Tables;
 import org.apache.accumulo.core.conf.Property;
@@ -310,9 +311,8 @@
 
       String[] tables = getUniqueNames(2);
 
-      client.tableOperations().create(tables[0]);
-
-      client.tableOperations().addSplits(tables[0], splits);
+      NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splits);
+      client.tableOperations().create(tables[0], ntc);
 
       try (BatchWriter bw = client.createBatchWriter(tables[0])) {
         bw.addMutations(mutations);
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ConcurrentDeleteTableIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ConcurrentDeleteTableIT.java
index 0d489fe..f02acbd 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ConcurrentDeleteTableIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ConcurrentDeleteTableIT.java
@@ -43,6 +43,7 @@
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.TableOfflineException;
 import org.apache.accumulo.core.client.admin.CompactionConfig;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.clientImpl.ClientContext;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.security.Authorizations;
@@ -60,16 +61,17 @@
   @Test
   public void testConcurrentDeleteTablesOps() throws Exception {
     try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
+
       String[] tables = getUniqueNames(2);
 
       TreeSet<Text> splits = createSplits();
+      NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splits);
 
       ExecutorService es = Executors.newFixedThreadPool(20);
 
       int count = 0;
       for (final String table : tables) {
-        c.tableOperations().create(table);
-        c.tableOperations().addSplits(table, splits);
+        c.tableOperations().create(table, ntc);
         writeData(c, table);
         if (count == 1) {
           c.tableOperations().flush(table, null, null, true);
@@ -161,6 +163,7 @@
       String[] tables = getUniqueNames(2);
 
       TreeSet<Text> splits = createSplits();
+      NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splits);
 
       int numOperations = 8;
 
@@ -168,8 +171,7 @@
 
       int count = 0;
       for (final String table : tables) {
-        c.tableOperations().create(table);
-        c.tableOperations().addSplits(table, splits);
+        c.tableOperations().create(table, ntc);
         writeData(c, table);
         if (count == 1) {
           c.tableOperations().flush(table, null, null, true);
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CreateAndUseIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
index eccd59f..d918e29 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
@@ -31,6 +31,7 @@
 import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
@@ -50,15 +51,17 @@
     return 4 * 60;
   }
 
-  private static SortedSet<Text> splits;
+  private static NewTableConfiguration ntc;
 
   @BeforeClass
   public static void createData() {
-    splits = new TreeSet<>();
+    SortedSet<Text> splits = new TreeSet<>();
 
     for (int i = 1; i < 256; i++) {
       splits.add(new Text(String.format("%08x", i << 8)));
     }
+
+    ntc = new NewTableConfiguration().withSplits(splits);
   }
 
   @Test
@@ -70,8 +73,8 @@
       Text cq = new Text("cq1");
 
       String tableName = getUniqueNames(1)[0];
-      client.tableOperations().create(tableName);
-      client.tableOperations().addSplits(tableName, splits);
+      client.tableOperations().create(tableName, ntc);
+
       try (BatchWriter bw = client.createBatchWriter(tableName)) {
         for (int i = 1; i < 257; i++) {
           Mutation m = new Mutation(new Text(String.format("%08x", (i << 8) - 16)));
@@ -98,9 +101,10 @@
   @Test
   public void createTableAndScan() throws Exception {
     try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
+
       String table2 = getUniqueNames(1)[0];
-      client.tableOperations().create(table2);
-      client.tableOperations().addSplits(table2, splits);
+      client.tableOperations().create(table2, ntc);
+
       try (Scanner scanner2 = client.createScanner(table2, Authorizations.EMPTY)) {
         int count = 0;
         for (Entry<Key,Value> entry : scanner2) {
@@ -124,8 +128,8 @@
       }
 
       String table3 = getUniqueNames(1)[0];
-      client.tableOperations().create(table3);
-      client.tableOperations().addSplits(table3, splits);
+      client.tableOperations().create(table3, ntc);
+
       try (BatchScanner bs = client.createBatchScanner(table3)) {
         bs.setRanges(ranges);
         Iterator<Entry<Key,Value>> iter = bs.iterator();
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/FateStarvationIT.java b/test/src/main/java/org/apache/accumulo/test/functional/FateStarvationIT.java
index c7d0e20..c9ac574 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/FateStarvationIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/FateStarvationIT.java
@@ -25,6 +25,7 @@
 
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.clientImpl.ClientContext;
 import org.apache.accumulo.harness.AccumuloClusterHarness;
 import org.apache.accumulo.test.TestIngest;
@@ -46,9 +47,8 @@
   public void run() throws Exception {
     String tableName = getUniqueNames(1)[0];
     try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
-      c.tableOperations().create(tableName);
-
-      c.tableOperations().addSplits(tableName, TestIngest.getSplitPoints(0, 100000, 50));
+      var ntc = new NewTableConfiguration().withSplits(TestIngest.getSplitPoints(0, 100000, 50));
+      c.tableOperations().create(tableName, ntc);
 
       IngestParams params = new IngestParams(getClientProps(), tableName, 100_000);
       params.random = 89;
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java b/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
index 952fcd0..7031c62 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
@@ -94,14 +94,12 @@
   private void runMergeTest(AccumuloClient client, String table, String[] splits, String[] inserts,
       String start, String end, String last, long expected) throws Exception {
     log.info("table {}", table);
-    client.tableOperations().create(table,
-        new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
     TreeSet<Text> splitSet = new TreeSet<>();
     for (String split : splits) {
       splitSet.add(new Text(split));
     }
-    client.tableOperations().addSplits(table, splitSet);
-
+    client.tableOperations().create(table,
+        new NewTableConfiguration().setTimeType(TimeType.LOGICAL).withSplits(splitSet));
     BatchWriter bw = client.createBatchWriter(table);
     for (String row : inserts) {
       Mutation m = new Mutation(row);
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ManyWriteAheadLogsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ManyWriteAheadLogsIT.java
index 9f1a8bc..bffe532 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ManyWriteAheadLogsIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ManyWriteAheadLogsIT.java
@@ -34,6 +34,7 @@
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.admin.InstanceOperations;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.harness.AccumuloClusterHarness;
@@ -129,8 +130,9 @@
 
       String manyWALsTable = tableNames[0];
       String rollWALsTable = tableNames[1];
-      c.tableOperations().create(manyWALsTable);
-      c.tableOperations().addSplits(manyWALsTable, splits);
+
+      NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splits);
+      c.tableOperations().create(manyWALsTable, ntc);
 
       c.tableOperations().create(rollWALsTable);
 
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenIT.java
index d2f3fd2..b406d96 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenIT.java
@@ -19,6 +19,7 @@
 package org.apache.accumulo.test.functional;
 
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -28,6 +29,7 @@
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.admin.InstanceOperations;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Range;
@@ -101,10 +103,11 @@
   public void run() throws Exception {
     try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
       final String tableName = getUniqueNames(1)[0];
-      c.tableOperations().create(tableName);
-      c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "10");
-      c.tableOperations().addSplits(tableName,
-          TestIngest.getSplitPoints(0, NUM_TO_INGEST, NUM_TABLETS));
+      HashMap<String,String> props = new HashMap<>();
+      props.put(Property.TABLE_MAJC_RATIO.getKey(), "10");
+      NewTableConfiguration ntc = new NewTableConfiguration().setProperties(props)
+          .withSplits(TestIngest.getSplitPoints(0, NUM_TO_INGEST, NUM_TABLETS));
+      c.tableOperations().create(tableName, ntc);
 
       // the following loop should create three tablets in each map file
       for (int i = 0; i < 3; i++) {
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
index ec03b0d..d64a52e 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
@@ -61,8 +61,8 @@
   public void merge() throws Exception {
     try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
-      c.tableOperations().create(tableName);
-      c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k".split(" ")));
+      var ntc = new NewTableConfiguration().withSplits(splits("a b c d e f g h i j k".split(" ")));
+      c.tableOperations().create(tableName, ntc);
       try (BatchWriter bw = c.createBatchWriter(tableName)) {
         for (String row : "a b c d e f g h i j k".split(" ")) {
           Mutation m = new Mutation(row);
@@ -80,9 +80,9 @@
   public void mergeSize() throws Exception {
     try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
       String tableName = getUniqueNames(1)[0];
-      c.tableOperations().create(tableName);
-      c.tableOperations().addSplits(tableName,
-          splits("a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")));
+      NewTableConfiguration ntc = new NewTableConfiguration()
+          .withSplits(splits("a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")));
+      c.tableOperations().create(tableName, ntc);
       try (BatchWriter bw = c.createBatchWriter(tableName)) {
         for (String row : "c e f y".split(" ")) {
           Mutation m = new Mutation(row);
@@ -167,13 +167,13 @@
     System.out.println(
         "Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end);
 
-    client.tableOperations().create(table,
-        new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
-    TreeSet<Text> splitSet = new TreeSet<>();
-    for (String split : splits) {
-      splitSet.add(new Text(split));
+    SortedSet<Text> splitSet = splits(splits);
+
+    NewTableConfiguration ntc = new NewTableConfiguration().setTimeType(TimeType.LOGICAL);
+    if (!splitSet.isEmpty()) {
+      ntc = ntc.withSplits(splitSet);
     }
-    client.tableOperations().addSplits(table, splitSet);
+    client.tableOperations().create(table, ntc);
 
     HashSet<String> expected = new HashSet<>();
     try (BatchWriter bw = client.createBatchWriter(table)) {
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
index 72acbb3..1da807c 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
@@ -27,6 +27,7 @@
 
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.clientImpl.ClientContext;
 import org.apache.accumulo.core.clientImpl.MasterClient;
 import org.apache.accumulo.core.clientImpl.thrift.ThriftNotActiveServiceException;
@@ -72,10 +73,9 @@
       sleepUninterruptibly(5, TimeUnit.SECONDS);
       for (int i = 0; i < 2; i++) {
         String tableName = "table" + i;
-        log.info("Creating {}", tableName);
-        c.tableOperations().create(tableName);
-        log.info("adding splits");
-        c.tableOperations().addSplits(tableName, splits);
+        log.info("Creating {} with splits", tableName);
+        NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splits);
+        c.tableOperations().create(tableName, ntc);
         log.info("flushing");
         c.tableOperations().flush(MetadataTable.NAME, null, null, true);
         c.tableOperations().flush(RootTable.NAME, null, null, true);
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ScanRangeIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ScanRangeIT.java
index cf7a6b7..aa17132 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ScanRangeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ScanRangeIT.java
@@ -25,6 +25,7 @@
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
@@ -53,12 +54,12 @@
       String table1 = tableNames[0];
       c.tableOperations().create(table1);
       String table2 = tableNames[1];
-      c.tableOperations().create(table2);
       TreeSet<Text> splitRows = new TreeSet<>();
       int splits = 3;
       for (int i = (ROW_LIMIT / splits); i < ROW_LIMIT; i += (ROW_LIMIT / splits))
         splitRows.add(createRow(i));
-      c.tableOperations().addSplits(table2, splitRows);
+      NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splitRows);
+      c.tableOperations().create(table2, ntc);
 
       insertData(c, table1);
       scanTable(c, table1);
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SummaryIT.java b/test/src/main/java/org/apache/accumulo/test/functional/SummaryIT.java
index ed77986..ed2ae6b 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/SummaryIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SummaryIT.java
@@ -590,14 +590,13 @@
   public void testBuggySummarizer() throws Exception {
     final String table = getUniqueNames(1)[0];
     try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
-      NewTableConfiguration ntc = new NewTableConfiguration();
       SummarizerConfiguration sc1 = SummarizerConfiguration.builder(BuggySummarizer.class).build();
-      ntc.enableSummarization(sc1);
+      // create table with a single split so that summary stats merge is forced
+      SortedSet<Text> split = new TreeSet<>(Collections.singleton(new Text("g")));
+      NewTableConfiguration ntc =
+          new NewTableConfiguration().enableSummarization(sc1).withSplits(split);
       c.tableOperations().create(table, ntc);
 
-      // add a single split so that summary stats merge is forced
-      c.tableOperations().addSplits(table, new TreeSet<>(Collections.singleton(new Text("g"))));
-
       try (BatchWriter bw = c.createBatchWriter(table)) {
         write(bw, "bar1", "f1", "q1", "v1");
         write(bw, "bar2", "f1", "q1", "v2");
@@ -886,9 +885,6 @@
   public void testManyFiles() throws Exception {
     final String table = getUniqueNames(1)[0];
     try (AccumuloClient c = Accumulo.newClient().from(getClientProps()).build()) {
-      NewTableConfiguration ntc = new NewTableConfiguration();
-      ntc.enableSummarization(SummarizerConfiguration.builder(FamilySummarizer.class).build());
-      c.tableOperations().create(table, ntc);
 
       Random rand = new SecureRandom();
       int q = 0;
@@ -897,7 +893,10 @@
       for (int split = 100_000; split < 1_000_000; split += 100_000) {
         partitionKeys.add(new Text(String.format("%06d", split)));
       }
-      c.tableOperations().addSplits(table, partitionKeys);
+      NewTableConfiguration ntc = new NewTableConfiguration()
+          .enableSummarization(SummarizerConfiguration.builder(FamilySummarizer.class).build())
+          .withSplits(partitionKeys);
+      c.tableOperations().create(table, ntc);
       Map<String,Long> famCounts = new HashMap<>();
 
       for (int t = 0; t < 20; t++) {