DRILL-7633: Fixes for union and repeated list accessors

Minor fixes and cleanup for the obscure union and
repeated list types in the column accesor framework.

Added variant type typeString() function
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/resultSet/impl/TestResultSetLoaderUnions.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/resultSet/impl/TestResultSetLoaderUnions.java
index 5b161d8..7479f15 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/resultSet/impl/TestResultSetLoaderUnions.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/resultSet/impl/TestResultSetLoaderUnions.java
@@ -19,6 +19,7 @@
 
 import static org.apache.drill.test.rowSet.RowSetUtilities.listValue;
 import static org.apache.drill.test.rowSet.RowSetUtilities.mapValue;
+import static org.apache.drill.test.rowSet.RowSetUtilities.objArray;
 import static org.apache.drill.test.rowSet.RowSetUtilities.strArray;
 import static org.apache.drill.test.rowSet.RowSetUtilities.variantArray;
 import static org.junit.Assert.assertEquals;
@@ -75,7 +76,6 @@
  * Most operators do not support them. But, JSON uses them, so they must
  * be made to work in the result set loader layer.
  */
-
 @Category(RowSetTests.class)
 public class TestResultSetLoaderUnions extends SubOperatorTest {
 
@@ -99,7 +99,6 @@
     final RowSetLoader writer = rsLoader.writer();
 
     // Sanity check of writer structure
-
     final ObjectWriter wo = writer.column(1);
     assertEquals(ObjectType.VARIANT, wo.type());
     final VariantWriter vw = wo.variant();
@@ -111,7 +110,6 @@
     assertNotNull(vw.memberWriter(MinorType.MAP));
 
     // Write values
-
     rsLoader.startBatch();
     writer
       .addRow(1, "first")
@@ -122,7 +120,6 @@
 
     // Verify the values.
     // (Relies on the row set level union tests having passed.)
-
     final SingleRowSet expected = fixture.rowSetBuilder(schema)
       .addRow(1, "first")
       .addRow(2, mapValue(20, "fred"))
@@ -142,7 +139,6 @@
     rsLoader.startBatch();
 
     // First row, (1, "first"), create types as we go.
-
     writer.start();
     writer.addColumn(SchemaBuilder.columnSchema("id", MinorType.INT, DataMode.REQUIRED));
     writer.scalar("id").setInt(1);
@@ -152,7 +148,6 @@
     writer.save();
 
     // Second row, (2, {20, "fred"}), create types as we go.
-
     writer.start();
     writer.scalar("id").setInt(2);
     final TupleWriter innerMap = variant.member(MinorType.MAP).tuple();
@@ -164,7 +159,6 @@
 
     // Write remaining rows using convenient methods, using
     // schema defined above.
-
     writer
       .addRow(3, null)
       .addRow(4, mapValue(40, null))
@@ -172,7 +166,6 @@
 
     // Verify the values.
     // (Relies on the row set level union tests having passed.)
-
     final TupleMetadata schema = new SchemaBuilder()
         .add("id", MinorType.INT)
         .addUnion("u")
@@ -219,7 +212,6 @@
     // (the half that get strings.
     // 16 MB / 32 K = 512 bytes
     // Make a bit bigger to overflow early.
-
     final int strLength = 600;
     final byte[] value = new byte[strLength - 6];
     Arrays.fill(value, (byte) 'X');
@@ -238,25 +230,20 @@
 
     // Number of rows should be driven by vector size.
     // Our row count should include the overflow row
-
     final int expectedCount = ValueVector.MAX_BUFFER_SIZE / strLength * 2;
     assertEquals(expectedCount + 1, count);
 
     // Loader's row count should include only "visible" rows
-
     assertEquals(expectedCount, writer.rowCount());
 
     // Total count should include invisible and look-ahead rows.
-
     assertEquals(expectedCount + 1, rsLoader.totalRowCount());
 
     // Result should exclude the overflow row
-
     RowSet result = fixture.wrap(rsLoader.harvest());
     assertEquals(expectedCount, result.rowCount());
 
     // Verify the data.
-
     RowSetReader reader = result.reader();
     int readCount = 0;
     while (reader.next()) {
@@ -273,7 +260,6 @@
     result.clear();
 
     // Write a few more rows to verify the overflow row.
-
     rsLoader.startBatch();
     for (int i = 0; i < 1000; i++) {
       if (count % 2 == 0) {
@@ -310,12 +296,10 @@
    * works for lists. Here we test that the ResultSetLoader put the
    * pieces together correctly.
    */
-
   @Test
   public void testSimpleList() {
 
     // Schema with a list declared with one type, not expandable
-
     final TupleMetadata schema = new SchemaBuilder()
         .add("id", MinorType.INT)
         .addList("list")
@@ -333,7 +317,6 @@
 
     // Sanity check: should be an array of Varchar because we said the
     // types within the list is not expandable.
-
     final ArrayWriter arrWriter = writer.array("list");
     assertEquals(ObjectType.SCALAR, arrWriter.entryType());
     final ScalarWriter strWriter = arrWriter.scalar();
@@ -341,7 +324,6 @@
 
     // Can write a batch as if this was a repeated Varchar, except
     // that any value can also be null.
-
     rsLoader.startBatch();
     writer
       .addRow(1, strArray("fred", "barney"))
@@ -349,7 +331,6 @@
       .addRow(3, strArray("wilma", "betty", "pebbles"));
 
     // Verify
-
     final SingleRowSet expected = fixture.rowSetBuilder(schema)
         .addRow(1, strArray("fred", "barney"))
         .addRow(2, null)
@@ -363,7 +344,6 @@
    * Test a simple list created dynamically at load time.
    * The list must include a single type member.
    */
-
   @Test
   public void testSimpleListDynamic() {
 
@@ -372,7 +352,6 @@
 
     // Can write a batch as if this was a repeated Varchar, except
     // that any value can also be null.
-
     rsLoader.startBatch();
 
     writer.addColumn(MaterializedField.create("id", Types.required(MinorType.INT)));
@@ -384,7 +363,6 @@
 
     // Sanity check: should be an array of Varchar because we said the
     // types within the list is not expandable.
-
     final ArrayWriter arrWriter = writer.array("list");
     assertEquals(ObjectType.SCALAR, arrWriter.entryType());
     final ScalarWriter strWriter = arrWriter.scalar();
@@ -396,7 +374,6 @@
       .addRow(3, strArray("wilma", "betty", "pebbles"));
 
     // Verify
-
     final TupleMetadata schema = new SchemaBuilder()
         .add("id", MinorType.INT)
         .addList("list")
@@ -416,12 +393,10 @@
    * Try to create a simple (non-expandable) list without
    * giving a member type. Expected to fail.
    */
-
   @Test
   public void testSimpleListNoTypes() {
 
     // Schema with a list declared with one type, not expandable
-
     final TupleMetadata schema = new SchemaBuilder()
         .add("id", MinorType.INT)
         .addList("list")
@@ -440,7 +415,6 @@
    * Try to create a simple (non-expandable) list while specifying
    * two types. Expected to fail.
    */
-
   @Test
   public void testSimpleListMultiTypes() {
 
@@ -462,7 +436,6 @@
     }
   }
 
-
   /**
    * Test a variant list created dynamically at load time.
    * The list starts with no type, at which time it can hold
@@ -472,20 +445,19 @@
    * This test is superficial. There are many odd cases to consider.
    * <ul>
    * <li>Write nulls to a list with no type. (This test ensures that
-   * adding a (nullable) scalar "does the right thing.")</li>
+   * adding a (nullable) scalar "does the right thing."</li>
    * <li>Add a map to the list. Maps carry no "bits" vector, so null
    * list entries to that point are lost. (For maps, we could go straight
    * to a union, with just a map, to preserve the null states. This whole
    * area is a huge mess...)</li>
    * <li>Do the type transitions when writing to a row. (The tests here
-   * do the transition between rows.</li>
+   * do the transition between rows.)</li>
    * </ul>
    *
    * The reason for the sparse coverage is that Drill barely supports lists
    * and unions; most code is just plain broken. Our goal here is not to fix
    * all those problems, just to leave things no more broken than before.
    */
-
   @Test
   public void testVariantListDynamic() {
 
@@ -494,7 +466,6 @@
 
     // Can write a batch as if this was a repeated Varchar, except
     // that any value can also be null.
-
     rsLoader.startBatch();
 
     writer.addColumn(MaterializedField.create("id", Types.required(MinorType.INT)));
@@ -502,7 +473,6 @@
 
     // Sanity check: should be an array of variants because we said the
     // types within the list are expandable (which is the default.)
-
     final ArrayWriter arrWriter = writer.array("list");
     assertEquals(ObjectType.VARIANT, arrWriter.entryType());
     final VariantWriter variant = arrWriter.variant();
@@ -510,24 +480,20 @@
     // We need to verify that the internal state is what we expect, so
     // the next assertion peeks inside the private bits of the union
     // writer. No client code should ever need to do this, of course.
-
     assertTrue(((UnionWriterImpl) variant).shim() instanceof EmptyListShim);
 
     // No types, so all we can do is add a null list, or a list of nulls.
-
     writer
       .addRow(1, null)
       .addRow(2, variantArray())
       .addRow(3, variantArray(null, null));
 
     // Add a String. Now we can create a list of strings and/or nulls.
-
     variant.addMember(MinorType.VARCHAR);
     assertTrue(variant.hasType(MinorType.VARCHAR));
 
     // Sanity check: sniff inside to ensure that the list contains a single
     // type.
-
     assertTrue(((UnionWriterImpl) variant).shim() instanceof SimpleListShim);
     assertTrue(((ListWriterImpl) arrWriter).vector().getDataVector() instanceof NullableVarCharVector);
 
@@ -536,11 +502,9 @@
 
     // Add an integer. The list vector should be promoted to union.
     // Now we can add both types.
-
     variant.addMember(MinorType.INT);
 
     // Sanity check: sniff inside to ensure promotion to union occurred
-
     assertTrue(((UnionWriterImpl) variant).shim() instanceof UnionVectorShim);
     assertTrue(((ListWriterImpl) arrWriter).vector().getDataVector() instanceof UnionVector);
 
@@ -548,9 +512,7 @@
       .addRow(5, variantArray("wilma", null, 30));
 
     // Verify
-
     final RowSet result = fixture.wrap(rsLoader.harvest());
-//    result.print();
 
     final TupleMetadata schema = new SchemaBuilder()
         .add("id", MinorType.INT)
@@ -571,6 +533,69 @@
   }
 
   /**
+   * Dynamically add a map to a list that also contains scalars.
+   * Assumes that {@link #testVariantListDynamic()} passed.
+   */
+  @Test
+  public void testVariantListWithMap() {
+
+    final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator());
+    final RowSetLoader writer = rsLoader.writer();
+
+    rsLoader.startBatch();
+    writer.addColumn(MaterializedField.create("id", Types.required(MinorType.INT)));
+    writer.addColumn(MaterializedField.create("list", Types.optional(MinorType.LIST)));
+
+    final ArrayWriter arrWriter = writer.array("list");
+    final VariantWriter variant = arrWriter.variant();
+
+    // Add a null list, or a list of nulls.
+    writer
+      .addRow(1, null)
+      .addRow(2, variantArray())
+      .addRow(3, variantArray(null, null));
+
+    // Add a String. Now we can create a list of strings and/or nulls.
+    variant.addMember(MinorType.VARCHAR);
+    writer
+      .addRow(4, variantArray("fred", null, "barney"));
+
+    // Add a map
+    final TupleWriter mapWriter = variant.addMember(MinorType.MAP).tuple();
+    mapWriter.addColumn(MetadataUtils.newScalar("first", Types.optional(MinorType.VARCHAR)));
+    mapWriter.addColumn(MetadataUtils.newScalar("last", Types.optional(MinorType.VARCHAR)));
+
+    // Add a map-based record
+    writer
+      .addRow(5, variantArray(
+          mapValue("wilma", "flintstone"), mapValue("betty", "rubble")));
+
+    // Verify
+    final RowSet result = fixture.wrap(rsLoader.harvest());
+
+    final TupleMetadata schema = new SchemaBuilder()
+        .add("id", MinorType.INT)
+        .addList("list")
+          .addType(MinorType.VARCHAR)
+          .addMap()
+            .addNullable("first", MinorType.VARCHAR)
+            .addNullable("last", MinorType.VARCHAR)
+            .resumeUnion()
+          .resumeSchema()
+        .buildSchema();
+    final SingleRowSet expected = fixture.rowSetBuilder(schema)
+        .addRow(1, null)
+        .addRow(2, variantArray())
+        .addRow(3, variantArray(null, null))
+        .addRow(4, variantArray("fred", null, "barney"))
+        .addRow(5, variantArray(
+            mapValue("wilma", "flintstone"), mapValue("betty", "rubble")))
+        .build();
+
+    RowSetUtilities.verify(expected, result);
+  }
+
+  /**
    * The semantics of the ListVector are such that it allows
    * multi-dimensional lists. In this way, it is like a (slightly
    * more normalized) version of the repeated list vector. This form
@@ -582,18 +607,15 @@
    * functionality at present, so this test is more of a proof-of-
    * concept than a necessity.
    */
-
   @Test
   public void testListofListofScalar() {
 
     // JSON equivalent: {a: [[1, 2], [3, 4]]}
-
     final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator());
     final RowSetLoader writer = rsLoader.writer();
 
     // Can write a batch as if this was a repeated Varchar, except
     // that any value can also be null.
-
     rsLoader.startBatch();
 
     writer.addColumn(MaterializedField.create("a", Types.optional(MinorType.LIST)));
@@ -608,7 +630,6 @@
     final RowSet results = fixture.wrap(rsLoader.harvest());
 
     // Verify metadata
-
     final ListVector outer = (ListVector) results.container().getValueVector(0).getValueVector();
     final MajorType outerType = outer.getField().getType();
     assertEquals(1, outerType.getSubTypeCount());
@@ -630,7 +651,6 @@
 
     // Note use of TupleMetadata: BatchSchema can't hold the
     // structure of a list.
-
     final TupleMetadata expectedSchema = new SchemaBuilder()
         .addList("a")
           .addList()
@@ -644,7 +664,99 @@
     RowSetUtilities.verify(expected, results);
   }
 
-  // TODO: Simple list of map
-  // TODO: Regular list of map
-  // TODO: Regular list of union
+  /**
+   * The repeated list type is way off in the weeds in Drill. It is not fully
+   * supported and the semantics are very murky as a result. It is not clear
+   * how such a structure fits into SQL or into an xDBC client. Still, it is
+   * part of Drill at present and must be supported in the result set loader.
+   * <p>
+   * This test models using the repeated list as a 2D array of UNION.
+   */
+  @Test
+  public void testRepeatedListOfUnion() {
+
+    final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator());
+    final RowSetLoader writer = rsLoader.writer();
+
+    // Can write a batch as if this was a repeated Varchar, except
+    // that any value can also be null.
+    rsLoader.startBatch();
+
+    writer.addColumn(MaterializedField.create("id", Types.required(MinorType.INT)));
+
+    // A union requires a structured column. The only tool to build that a present
+    // is the schema builder, so we use that and grab a single column.
+    final TupleMetadata dummySchema = new SchemaBuilder()
+        .addRepeatedList("list")
+          .addArray(MinorType.UNION)
+          .resumeSchema()
+        .buildSchema();
+    writer.addColumn(dummySchema.metadata(0));
+
+    // Sanity check: should be an array of array of variants.
+    final ArrayWriter outerArrWriter  = writer.array("list");
+    assertEquals(ObjectType.ARRAY, outerArrWriter.entryType());
+    final ArrayWriter innerArrWriter = outerArrWriter.array();
+    assertEquals(ObjectType.VARIANT, innerArrWriter.entryType());
+    final VariantWriter variant = innerArrWriter.variant();
+
+    // No types, so all we can do is add a null list, or a list of nulls.
+    writer
+      .addRow(1, null)
+      .addRow(2, objArray())
+      .addRow(3, objArray(null, null))
+      .addRow(4, objArray(variantArray(), variantArray()))
+      .addRow(5, objArray(variantArray(null, null), variantArray(null, null)));
+
+    // Add a String. Now we can create a list of strings and/or nulls.
+    variant.addMember(MinorType.VARCHAR);
+    assertTrue(variant.hasType(MinorType.VARCHAR));
+
+    writer
+      .addRow(6, objArray(
+          variantArray("fred", "wilma", null),
+          variantArray("barney", "betty", null)));
+
+    // Add a map
+    final TupleWriter mapWriter = variant.addMember(MinorType.MAP).tuple();
+    mapWriter.addColumn(MetadataUtils.newScalar("first", Types.optional(MinorType.VARCHAR)));
+    mapWriter.addColumn(MetadataUtils.newScalar("last", Types.optional(MinorType.VARCHAR)));
+
+    // Add a map-based record
+    writer
+      .addRow(7, objArray(
+          variantArray(mapValue("fred", "flintstone"), mapValue("wilma", "flintstone")),
+          variantArray(mapValue("barney", "rubble"), mapValue("betty", "rubble"))));
+
+    // Verify
+    final RowSet result = fixture.wrap(rsLoader.harvest());
+
+    final TupleMetadata schema = new SchemaBuilder()
+        .add("id", MinorType.INT)
+        .addRepeatedList("list")
+          .addList()
+            .addType(MinorType.VARCHAR)
+            .addMap()
+              .addNullable("first", MinorType.VARCHAR)
+              .addNullable("last", MinorType.VARCHAR)
+              .resumeUnion()
+            .resumeRepeatedList()
+          .resumeSchema()
+        .buildSchema();
+    final SingleRowSet expected = fixture.rowSetBuilder(schema)
+        .addRow(1, null)
+        .addRow(2, objArray())
+        .addRow(3, objArray(null, null))
+        .addRow(4, objArray(variantArray(), variantArray()))
+        .addRow(5, objArray(variantArray(null, null), variantArray(null, null)))
+        .addRow(6, objArray(
+            variantArray("fred", "wilma", null),
+            variantArray("barney", "betty", null)))
+        .addRow(7, objArray(
+            variantArray(mapValue("fred", "flintstone"), mapValue("wilma", "flintstone")),
+            variantArray(mapValue("barney", "rubble"), mapValue("betty", "rubble"))))
+        .build();
+
+    RowSetUtilities.verify(expected, result);
+  }
 }
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/rowSet/TestSchemaBuilder.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/rowSet/TestSchemaBuilder.java
index 880a316..e754db5 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/rowSet/TestSchemaBuilder.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/rowSet/TestSchemaBuilder.java
@@ -222,7 +222,6 @@
     // optional list has one set of semantics (in ListVector, not
     // really supported), while a repeated list has entirely different
     // semantics (in the RepeatedListVector) and is supported.
-
     assertEquals(DataMode.OPTIONAL, list.mode());
 
     VariantMetadata variant = list.variantSchema();
@@ -390,7 +389,6 @@
         .buildSchema();
 
     // Use name methods, just for variety
-
     ColumnMetadata a = schema.metadata("a");
     assertEquals(MinorType.VARDECIMAL, a.type());
     assertEquals(DataMode.OPTIONAL, a.mode());
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetComparison.java b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetComparison.java
index 8e287e4..f084091 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetComparison.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/RowSetComparison.java
@@ -20,6 +20,7 @@
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.math.BigDecimal;
 import java.math.MathContext;
@@ -76,6 +77,7 @@
    * to construct BigDecimals of the desired precision.
    */
   private MathContext scale = new MathContext(3);
+
   /**
   * Floats and doubles do not compare exactly. This delta is used
   * by JUnit for such comparisons. This is not a general solution;
@@ -163,10 +165,13 @@
   }
 
   private void compareSchemasAndCounts(RowSet actual) {
-    assertTrue("Schemas don't match.\n" +
-      "Expected: " + expected.schema().toString() +
-      "\nActual:   " + actual.schema(),
-      expected.schema().isEquivalent(actual.schema()));
+    if (!expected.schema().isEquivalent(actual.schema())) {
+      // Avoid building the error string on every comparison,
+      // only build on failures.
+      fail("Schemas don't match.\n" +
+        "Expected: " + expected.schema().toString() +
+        "\nActual:   " + actual.schema().toString());
+    }
     int testLength = getTestLength();
     int dataLength = offset + testLength;
     assertTrue("Missing expected rows", expected.rowCount() >= dataLength);
@@ -204,7 +209,7 @@
   }
 
   /**
-   * Convenience method to verify the actual results, then free memory
+   * Verifies the actual results, then frees memory
    * for both the expected and actual result sets.
    * @param actual the actual results to verify
    */
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/VariantColumnMetadata.java b/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/VariantColumnMetadata.java
index c1ee98e..b54d50f 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/VariantColumnMetadata.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/VariantColumnMetadata.java
@@ -145,9 +145,7 @@
 
   @Override
   public ColumnMetadata copy() {
-    // TODO Auto-generated method stub
-    assert false;
-    return null;
+    return new VariantColumnMetadata(name, type, mode, variantSchema.copy());
   }
 
   @Override
@@ -156,6 +154,19 @@
   }
 
   @Override
+  public String typeString() {
+    StringBuilder builder = new StringBuilder();
+    if (isArray()) {
+      builder.append("ARRAY<");
+    }
+    builder.append("UNION");
+    if (isArray()) {
+      builder.append(">");
+    }
+    return builder.toString();
+  }
+
+  @Override
   public MaterializedField schema() {
     return MaterializedField.create(name,
         MajorType.newBuilder()
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/VariantSchema.java b/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/VariantSchema.java
index 8261419..65d6fdb 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/VariantSchema.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/record/metadata/VariantSchema.java
@@ -206,4 +206,13 @@
     copy.isSimple = isSimple;
     return copy;
   }
-}
\ No newline at end of file
+
+  public VariantSchema copy() {
+    VariantSchema copy = new VariantSchema();
+    copy.isSimple = isSimple;
+    for (ColumnMetadata type : types.values()) {
+      copy.addType(type);
+    }
+    return copy;
+  }
+}
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/VariantWriter.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/VariantWriter.java
index 705ed83..25cade7 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/VariantWriter.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/VariantWriter.java
@@ -54,7 +54,6 @@
  *
  * @see {@link VariantReader}
  */
-
 public interface VariantWriter extends ColumnWriter {
 
   interface VariantWriterListener {
@@ -69,7 +68,6 @@
    *
    * @return metadata for the variant
    */
-
   VariantMetadata variantSchema();
 
   /**
@@ -78,7 +76,6 @@
    *
    * @return number of types in the variant
    */
-
   int size();
 
   /**
@@ -86,11 +83,10 @@
    * given type. (The storage will be created as needed during writing.)
    *
    * @param type data type
-   * @return <tt>true</tt> if a value of the given type has been written
+   * @return {@code true} if a value of the given type has been written
    * and storage allocated (or storage was allocated implicitly),
-   * <tt>false</tt> otherwise
+   * {@code false} otherwise
    */
-
   boolean hasType(MinorType type);
 
   ObjectWriter addMember(MinorType type);
@@ -107,7 +103,6 @@
    * @return the writer for that type without setting the type of the
    * current row.
    */
-
   ObjectWriter memberWriter(MinorType type);
 
   /**
@@ -116,7 +111,6 @@
    *
    * @param type type to set for the current row
    */
-
   void setType(MinorType type);
 
   /**
@@ -128,7 +122,6 @@
    * @param type type to set for the current row
    * @return writer for the type just set
    */
-
   ObjectWriter member(MinorType type);
   ScalarWriter scalar(MinorType type);
   TupleWriter tuple();
diff --git a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionVectorShim.java b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionVectorShim.java
index d18b382..f9d0996 100644
--- a/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionVectorShim.java
+++ b/exec/vector/src/main/java/org/apache/drill/exec/vector/accessor/writer/UnionVectorShim.java
@@ -37,7 +37,6 @@
  * list itself evolves from no type, to a single type and to
  * a union.
  */
-
 public class UnionVectorShim implements UnionShim {
 
   static class DefaultListener implements VariantWriterListener {
@@ -56,7 +55,6 @@
       // which will create the member metadata. This means that the type
       // will already be in the variant schema by the time we add the
       // writer to the variant writer in a few steps from now.
-
       final ValueVector memberVector = shim.vector.getMember(type);
       final ColumnMetadata memberSchema = shim.writer.variantSchema().addType(type);
       return ColumnWriterFactory.buildColumnWriter(memberSchema, memberVector);
@@ -77,7 +75,6 @@
    * says which union member holds the value for each row. The type vector
    * can also indicate if the value is null.
    */
-
   private final BaseScalarWriter typeWriter;
 
   public UnionVectorShim(UnionVector vector) {
@@ -118,7 +115,6 @@
 
   // Unions are complex: the listener should bind to the individual components
   // as they are created.
-
   @Override
   public void bindListener(ColumnWriterListener listener) { }
 
@@ -128,7 +124,6 @@
     // Not really necessary: the default value is 0.
     // This lets a caller change its mind after setting a
     // value.
-
     typeWriter.setInt(UnionVector.NULL_MARKER);
   }
 
@@ -175,7 +170,6 @@
    *
    * @param colWriter the column writer to add
    */
-
   @Override
   public void addMember(AbstractObjectWriter colWriter) {
     addMemberWriter(colWriter);
@@ -189,11 +183,10 @@
    * and provides this shim with the writer from the single-list shim.
    * In the latter case, the writer is already initialized and is already
    * part of the metadata for this list; so we don't want to call the
-   * list's <tt>addMember()</tt> and repeat those operations.
+   * list's {@code addMember()} and repeat those operations.
    *
    * @param colWriter the column (type) writer to add
    */
-
   public void addMemberWriter(AbstractObjectWriter colWriter) {
     final MinorType type = colWriter.schema().type();
     assert variants[type.ordinal()] == null;
@@ -288,7 +281,6 @@
    *
    * @return the writer for the types vector
    */
-
   public AbstractScalarWriterImpl typeWriter() { return typeWriter; }
 
   @Override
@@ -306,7 +298,6 @@
    * far. Tell the type writer that those positions have been
    * written so that they are not zero-filled.
    */
-
   public void initTypeIndex(int typeFillCount) {
     ((BaseFixedWidthWriter) typeWriter).setLastWriteIndex(typeFillCount);
   }