DRILL-7530: Fix class names in loggers

1. Fix incorrect class names for loggers.
2. Minor code cleanup.

closes #1957
diff --git a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBTableCache.java b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBTableCache.java
index df8d59d..281fd6d 100644
--- a/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBTableCache.java
+++ b/contrib/format-maprdb/src/main/java/org/apache/drill/exec/store/mapr/db/MapRDBTableCache.java
@@ -34,9 +34,12 @@
 import org.apache.drill.shaded.guava.com.google.common.cache.LoadingCache;
 import org.apache.drill.shaded.guava.com.google.common.cache.RemovalListener;
 import org.apache.drill.shaded.guava.com.google.common.cache.RemovalNotification;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class MapRDBTableCache {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(MapRDBFormatPlugin.class);
+
+  private static final Logger logger = LoggerFactory.getLogger(MapRDBTableCache.class);
 
   public static final String FORMAT_MAPRDB_JSON_TABLE_CACHE_ENABLED = "format-maprdb.json.tableCache.enabled";
 
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
index 5cec0fb..005b6e7 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/HiveFuncHolderExpr.java
@@ -29,8 +29,8 @@
 import org.apache.drill.exec.expr.fn.HiveFuncHolder;
 
 public class HiveFuncHolderExpr extends FunctionHolderExpression implements Iterable<LogicalExpression>{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillFuncHolderExpr.class);
-  private HiveFuncHolder holder;
+
+  private final HiveFuncHolder holder;
 
   public HiveFuncHolderExpr(String nameUsed, HiveFuncHolder holder, List<LogicalExpression> args, ExpressionPosition pos) {
     super(nameUsed, pos, args);
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java
index 39dec58..192662e 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/expr/fn/HiveFuncHolder.java
@@ -49,7 +49,6 @@
 import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
 
 public class HiveFuncHolder extends AbstractFuncHolder {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(FunctionImplementationRegistry.class);
 
   private MajorType[] argTypes;
   private ObjectInspector returnOI;
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveMetadataProvider.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveMetadataProvider.java
index f6ee511..4a2bb58 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveMetadataProvider.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/HiveMetadataProvider.java
@@ -41,6 +41,8 @@
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
@@ -63,7 +65,8 @@
  * loaded, InputSplits are cached to speedup subsequent access.
  */
 public class HiveMetadataProvider {
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveMetadataProvider.class);
+
+  private static final Logger logger = LoggerFactory.getLogger(HiveMetadataProvider.class);
 
   public static final int RECORD_SIZE = 1024;
 
@@ -393,7 +396,7 @@
    */
   public static class HiveStats {
 
-    private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HiveStats.class);
+    private static final Logger logger = LoggerFactory.getLogger(HiveStats.class);
 
     private long numRows;
     private long sizeInBytes;
diff --git a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/client/TableEntryCacheLoader.java b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/client/TableEntryCacheLoader.java
index 2d32faf..1a19ff9 100644
--- a/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/client/TableEntryCacheLoader.java
+++ b/contrib/storage-hive/core/src/main/java/org/apache/drill/exec/store/hive/client/TableEntryCacheLoader.java
@@ -42,7 +42,7 @@
  */
 final class TableEntryCacheLoader extends CacheLoader<TableName, HiveReadEntry> {
 
-  private static final Logger logger = LoggerFactory.getLogger(TableNameCacheLoader.class);
+  private static final Logger logger = LoggerFactory.getLogger(TableEntryCacheLoader.class);
 
   private final DrillHiveMetaStoreClient client;
 
diff --git a/contrib/storage-kafka/src/test/java/org/apache/drill/exec/store/kafka/TestKafkaSuit.java b/contrib/storage-kafka/src/test/java/org/apache/drill/exec/store/kafka/TestKafkaSuit.java
index c996c38..0d61fd7 100644
--- a/contrib/storage-kafka/src/test/java/org/apache/drill/exec/store/kafka/TestKafkaSuit.java
+++ b/contrib/storage-kafka/src/test/java/org/apache/drill/exec/store/kafka/TestKafkaSuit.java
@@ -52,7 +52,8 @@
 @RunWith(Suite.class)
 @SuiteClasses({KafkaQueriesTest.class, MessageIteratorTest.class, MessageReaderFactoryTest.class, KafkaFilterPushdownTest.class})
 public class TestKafkaSuit extends BaseTest {
-  private static final Logger logger = LoggerFactory.getLogger(LoggerFactory.class);
+
+  private static final Logger logger = LoggerFactory.getLogger(TestKafkaSuit.class);
 
   private static final String LOGIN_CONF_RESOURCE_PATHNAME = "login.conf";
 
diff --git a/contrib/storage-opentsdb/src/main/java/org/apache/drill/exec/store/openTSDB/OpenTSDBRecordReader.java b/contrib/storage-opentsdb/src/main/java/org/apache/drill/exec/store/openTSDB/OpenTSDBRecordReader.java
index 6788dac..00d5100 100644
--- a/contrib/storage-opentsdb/src/main/java/org/apache/drill/exec/store/openTSDB/OpenTSDBRecordReader.java
+++ b/contrib/storage-opentsdb/src/main/java/org/apache/drill/exec/store/openTSDB/OpenTSDBRecordReader.java
@@ -20,7 +20,6 @@
 import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableList;
 import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableMap;
 import org.apache.drill.common.exceptions.DrillRuntimeException;
-import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.common.expression.SchemaPath;
 import org.apache.drill.common.types.TypeProtos;
@@ -45,7 +44,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Iterator;
 import java.util.List;
@@ -58,7 +56,7 @@
 
 public class OpenTSDBRecordReader extends AbstractRecordReader {
 
-  private static final Logger log = LoggerFactory.getLogger(OpenTSDBRecordReader.class);
+  private static final Logger logger = LoggerFactory.getLogger(OpenTSDBRecordReader.class);
 
   // batch size should not exceed max allowed record count
   private static final int TARGET_RECORD_COUNT = 4000;
@@ -74,23 +72,23 @@
   private Map<String, String> params;
 
   public OpenTSDBRecordReader(Service client, OpenTSDBSubScan.OpenTSDBSubScanSpec subScanSpec,
-                       List<SchemaPath> projectedColumns) throws IOException {
+                       List<SchemaPath> projectedColumns) {
     setColumns(projectedColumns);
     this.db = client;
     this.params =
             fromRowData(subScanSpec.getTableName());
-    log.debug("Scan spec: {}", subScanSpec);
+    logger.debug("Scan spec: {}", subScanSpec);
   }
 
   @Override
-  public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
+  public void setup(OperatorContext context, OutputMutator output) {
     this.output = output;
     Set<MetricDTO> metrics =
             db.getAllMetrics(params);
     if (metrics == null) {
       throw UserException.validationError()
               .message(String.format("Table '%s' not found", params.get(METRIC_PARAM)))
-              .build(log);
+              .build(logger);
     }
     this.tableIterator = metrics.iterator();
   }
@@ -105,7 +103,7 @@
   }
 
   @Override
-  public void close() throws Exception {
+  public void close() {
   }
 
   static {
@@ -210,7 +208,7 @@
                         + "The column's name was %s and its OpenTSDB data type was %s. ", name, type.toString());
         throw UserException.unsupportedError()
                 .message(message)
-                .build(log);
+                .build(logger);
       }
 
       ProjectedColumnInfo pci = getProjectedColumnInfo(column, name, minorType);
diff --git a/contrib/storage-opentsdb/src/main/java/org/apache/drill/exec/store/openTSDB/OpenTSDBStoragePluginConfig.java b/contrib/storage-opentsdb/src/main/java/org/apache/drill/exec/store/openTSDB/OpenTSDBStoragePluginConfig.java
index 1b67c1d..e207886 100644
--- a/contrib/storage-opentsdb/src/main/java/org/apache/drill/exec/store/openTSDB/OpenTSDBStoragePluginConfig.java
+++ b/contrib/storage-opentsdb/src/main/java/org/apache/drill/exec/store/openTSDB/OpenTSDBStoragePluginConfig.java
@@ -25,24 +25,23 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
 import java.util.Objects;
 
 @JsonTypeName(OpenTSDBStoragePluginConfig.NAME)
 public class OpenTSDBStoragePluginConfig extends StoragePluginConfigBase {
 
-  private static final Logger log = LoggerFactory.getLogger(OpenTSDBStoragePluginConfig.class);
+  private static final Logger logger = LoggerFactory.getLogger(OpenTSDBStoragePluginConfig.class);
 
   public static final String NAME = "openTSDB";
 
   private final String connection;
 
   @JsonCreator
-  public OpenTSDBStoragePluginConfig(@JsonProperty("connection") String connection) throws IOException {
+  public OpenTSDBStoragePluginConfig(@JsonProperty("connection") String connection) {
     if (connection == null || connection.isEmpty()) {
       throw UserException.validationError()
               .message("Connection property must not be null. Check plugin configuration.")
-              .build(log);
+              .build(logger);
     }
     this.connection = connection;
   }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/MathFunctions.java b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/MathFunctions.java
index dd5b359..5e4615b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/MathFunctions.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/MathFunctions.java
@@ -32,10 +32,7 @@
 import org.apache.drill.exec.expr.holders.NullableVarCharHolder;
 import org.apache.drill.exec.expr.holders.VarCharHolder;
 
-public class MathFunctions{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(MathFunctions.class);
-
-  private MathFunctions(){}
+public class MathFunctions {
 
   @FunctionTemplate(name = "power", scope = FunctionScope.SIMPLE, nulls = NullHandling.NULL_IF_NULL)
   public static class Power implements DrillSimpleFunc{
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnionAll.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnionAll.java
index df5d328..4593737 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnionAll.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/config/UnionAll.java
@@ -32,8 +32,6 @@
 
 public class UnionAll extends AbstractMultiple {
 
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(Filter.class);
-
   @JsonCreator
   public UnionAll(@JsonProperty("children") List<PhysicalOperator> children) {
     super(children);
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScreenCreator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScreenCreator.java
index b778937..625bfb3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScreenCreator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/ScreenCreator.java
@@ -19,7 +19,6 @@
 
 import java.util.List;
 
-import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.exec.exception.OutOfMemoryException;
 import org.apache.drill.exec.ops.AccountingUserConnection;
 import org.apache.drill.exec.ops.ExecutorFragmentContext;
@@ -37,20 +36,21 @@
 import org.apache.drill.exec.testing.ControlsInjectorFactory;
 
 import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ScreenCreator implements RootCreator<Screen> {
   private static final ControlsInjector injector = ControlsInjectorFactory.getInjector(ScreenCreator.class);
 
   @Override
-  public RootExec getRoot(ExecutorFragmentContext context, Screen config, List<RecordBatch> children)
-      throws ExecutionSetupException {
+  public RootExec getRoot(ExecutorFragmentContext context, Screen config, List<RecordBatch> children) {
     Preconditions.checkNotNull(children);
     Preconditions.checkArgument(children.size() == 1);
     return new ScreenRoot(context, children.iterator().next(), config);
   }
 
   public static class ScreenRoot extends BaseRootExec {
-    private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ScreenRoot.class);
+    private static final Logger logger = LoggerFactory.getLogger(ScreenRoot.class);
     private final RecordBatch incoming;
     private final RootFragmentContext context;
     private final AccountingUserConnection userConnection;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/SingleSenderCreator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/SingleSenderCreator.java
index c23ac44..d14d09a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/SingleSenderCreator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/SingleSenderCreator.java
@@ -19,7 +19,6 @@
 
 import java.util.List;
 
-import org.apache.drill.common.exceptions.ExecutionSetupException;
 import org.apache.drill.exec.exception.OutOfMemoryException;
 import org.apache.drill.exec.ops.AccountingDataTunnel;
 import org.apache.drill.exec.ops.ExecutorFragmentContext;
@@ -33,18 +32,19 @@
 import org.apache.drill.exec.record.RecordBatch.IterOutcome;
 import org.apache.drill.exec.testing.ControlsInjector;
 import org.apache.drill.exec.testing.ControlsInjectorFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class SingleSenderCreator implements RootCreator<SingleSender>{
 
   @Override
-  public RootExec getRoot(ExecutorFragmentContext context, SingleSender config, List<RecordBatch> children)
-      throws ExecutionSetupException {
+  public RootExec getRoot(ExecutorFragmentContext context, SingleSender config, List<RecordBatch> children) {
     assert children != null && children.size() == 1;
     return new SingleSenderRootExec(context, children.iterator().next(), config);
   }
 
   public static class SingleSenderRootExec extends BaseRootExec {
-    private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(SingleSenderRootExec.class);
+    private static final Logger logger = LoggerFactory.getLogger(SingleSenderRootExec.class);
     private static final ControlsInjector injector = ControlsInjectorFactory.getInjector(SingleSenderRootExec.class);
 
     private final FragmentHandle oppositeHandle;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java
index 3185899..3f13a5a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/HashAggTemplate.java
@@ -81,7 +81,8 @@
 import org.slf4j.LoggerFactory;
 
 public abstract class HashAggTemplate implements HashAggregator {
-  protected static final Logger logger = LoggerFactory.getLogger(HashAggregator.class);
+
+  protected static final Logger logger = LoggerFactory.getLogger(HashAggTemplate.class);
 
   private static final int VARIABLE_MAX_WIDTH_VALUE_SIZE = 50;
   private static final int VARIABLE_MIN_WIDTH_VALUE_SIZE = 8;
@@ -143,7 +144,7 @@
   private HashAggUpdater updater;
   private final SpilledState<HashAggSpilledPartition> spilledState = new SpilledState<>();
   private SpillSet spillSet;
-  SpilledRecordbatch newIncoming; // when reading a spilled file - work like an "incoming"
+  SpilledRecordBatch newIncoming; // when reading a spilled file - work like an "incoming"
   private Writer writers[]; // a vector writer for each spilled partition
   private int spilledBatchesCount[]; // count number of batches spilled, in each partition
   private String spillFiles[];
@@ -1146,7 +1147,7 @@
         // pick a spilled partition; set a new incoming ...
         HashAggSpilledPartition sp = spilledState.getNextSpilledPartition();
         // Create a new "incoming" out of the spilled partition spill file
-        newIncoming = new SpilledRecordbatch(sp.getSpillFile(), sp.getSpilledBatches(), context, schema, oContext, spillSet);
+        newIncoming = new SpilledRecordBatch(sp.getSpillFile(), sp.getSpilledBatches(), context, schema, oContext, spillSet);
         originalPartition = sp.getOriginPartition(); // used for the filename
         logger.trace("Reading back spilled original partition {} as an incoming",originalPartition);
         // Initialize .... new incoming, new set of partitions
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/SpilledRecordbatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/SpilledRecordBatch.java
similarity index 91%
rename from exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/SpilledRecordbatch.java
rename to exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/SpilledRecordBatch.java
index 56adae2..33cad10 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/SpilledRecordbatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/SpilledRecordBatch.java
@@ -25,13 +25,14 @@
 import org.apache.drill.exec.physical.impl.spill.SpillSet;
 import org.apache.drill.exec.record.BatchSchema;
 import org.apache.drill.exec.record.CloseableRecordBatch;
-import org.apache.drill.exec.record.SimpleRecordBatch;
 import org.apache.drill.exec.record.TypedFieldId;
 import org.apache.drill.exec.record.VectorContainer;
 import org.apache.drill.exec.record.VectorWrapper;
 import org.apache.drill.exec.record.WritableBatch;
 import org.apache.drill.exec.record.selection.SelectionVector2;
 import org.apache.drill.exec.record.selection.SelectionVector4;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.io.InputStream;
@@ -40,9 +41,9 @@
 /**
  * A class to replace "incoming" - instead scanning a spilled partition file
  */
-public class SpilledRecordbatch implements CloseableRecordBatch {
+public class SpilledRecordBatch implements CloseableRecordBatch {
 
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(SimpleRecordBatch.class);
+  private static final Logger logger = LoggerFactory.getLogger(SpilledRecordBatch.class);
 
   private VectorContainer container;
   private InputStream spillStream;
@@ -57,7 +58,7 @@
   // during the method's execution a value IterOutcome.STOP will be assigned.
   private IterOutcome lastOutcome;
 
-  public SpilledRecordbatch(String spillFile, int spilledBatches, FragmentContext context, BatchSchema schema, OperatorContext oContext, SpillSet spillSet) {
+  public SpilledRecordBatch(String spillFile, int spilledBatches, FragmentContext context, BatchSchema schema, OperatorContext oContext, SpillSet spillSet) {
     this.context = context;
     this.schema = schema;
     this.spilledBatches = spilledBatches;
@@ -69,7 +70,7 @@
     try {
       this.spillStream = this.spillSet.openForInput(spillFile);
     } catch (IOException e) {
-      throw UserException.resourceError(e).build(HashAggBatch.logger);
+      throw UserException.resourceError(e).build(logger);
     }
 
     initialOutcome = next(); // initialize the container
@@ -148,7 +149,7 @@
     }
 
     if ( spillSet.getPosition(spillStream)  < 0 ) {
-      HashAggTemplate.logger.warn("Position is {} for stream {}", spillSet.getPosition(spillStream), spillStream.toString());
+      logger.warn("Position is {} for stream {}", spillSet.getPosition(spillStream), spillStream.toString());
     }
 
     try {
@@ -163,7 +164,7 @@
       }
     } catch (IOException e) {
       lastOutcome = IterOutcome.STOP;
-      throw UserException.dataReadError(e).addContext("Failed reading from a spill file").build(HashAggTemplate.logger);
+      throw UserException.dataReadError(e).addContext("Failed reading from a spill file").build(logger);
     } catch (Exception e) {
       lastOutcome = IterOutcome.STOP;
       throw e;
@@ -206,7 +207,6 @@
     }
     catch (IOException e) {
       /* ignore */
-    } finally {
     }
   }
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/StreamingAggTemplate.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/StreamingAggTemplate.java
index 1ada0ac..e2f431b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/StreamingAggTemplate.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/aggregate/StreamingAggTemplate.java
@@ -34,7 +34,7 @@
 import org.slf4j.LoggerFactory;
 
 public abstract class StreamingAggTemplate implements StreamingAggregator {
-  private static final Logger logger = LoggerFactory.getLogger(StreamingAggregator.class);
+  private static final Logger logger = LoggerFactory.getLogger(StreamingAggTemplate.class);
   private static final boolean EXTRA_DEBUG = false;
   private int maxOutputRows = ValueVector.MAX_ROW_COUNT;
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/common/HashTableTemplate.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/common/HashTableTemplate.java
index b549a9e..c93de9e 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/common/HashTableTemplate.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/common/HashTableTemplate.java
@@ -48,12 +48,14 @@
 import org.apache.drill.exec.vector.ValueVector;
 import org.apache.drill.common.exceptions.RetryAfterSpillException;
 import org.apache.drill.exec.vector.VariableWidthVector;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public abstract class HashTableTemplate implements HashTable {
 
   public static final int MAX_VARCHAR_SIZE = 8; // This is a bad heuristic which will be eliminated when the keys are removed from the HashTable.
 
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HashTable.class);
+  private static final Logger logger = LoggerFactory.getLogger(HashTableTemplate.class);
   private static final boolean EXTRA_DEBUG = false;
 
   private static final int EMPTY_SLOT = -1;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java
index 6977e18..5edea2c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinBatch.java
@@ -59,7 +59,7 @@
 import org.apache.drill.exec.ops.MetricDef;
 import org.apache.drill.exec.physical.base.AbstractBase;
 import org.apache.drill.exec.physical.config.HashJoinPOP;
-import org.apache.drill.exec.physical.impl.aggregate.SpilledRecordbatch;
+import org.apache.drill.exec.physical.impl.aggregate.SpilledRecordBatch;
 import org.apache.drill.exec.physical.impl.common.AbstractSpilledPartitionMetadata;
 import org.apache.drill.exec.physical.impl.common.ChainedHashTable;
 import org.apache.drill.exec.physical.impl.common.Comparator;
@@ -638,15 +638,15 @@
             }
 
             // Create a BUILD-side "incoming" out of the inner spill file of that partition
-            buildBatch = new SpilledRecordbatch(currSp.innerSpillFile, currSp.innerSpilledBatches, context, buildSchema, oContext, spillSet);
+            buildBatch = new SpilledRecordBatch(currSp.innerSpillFile, currSp.innerSpilledBatches, context, buildSchema, oContext, spillSet);
             // The above ctor call also got the first batch; need to update the outcome
-            rightUpstream = ((SpilledRecordbatch) buildBatch).getInitialOutcome();
+            rightUpstream = ((SpilledRecordBatch) buildBatch).getInitialOutcome();
 
             if (currSp.outerSpilledBatches > 0) {
               // Create a PROBE-side "incoming" out of the outer spill file of that partition
-              probeBatch = new SpilledRecordbatch(currSp.outerSpillFile, currSp.outerSpilledBatches, context, probeSchema, oContext, spillSet);
+              probeBatch = new SpilledRecordBatch(currSp.outerSpillFile, currSp.outerSpilledBatches, context, probeSchema, oContext, spillSet);
               // The above ctor call also got the first batch; need to update the outcome
-              leftUpstream = ((SpilledRecordbatch) probeBatch).getInitialOutcome();
+              leftUpstream = ((SpilledRecordBatch) probeBatch).getInitialOutcome();
             } else {
               probeBatch = left; // if no outer batch then reuse left - needed for updateIncoming()
               leftUpstream = IterOutcome.NONE;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinMemoryCalculator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinMemoryCalculator.java
index c262e3c..eddb398 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinMemoryCalculator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinMemoryCalculator.java
@@ -21,8 +21,6 @@
 import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
 import org.apache.commons.io.FileUtils;
 import org.apache.drill.exec.record.RecordBatch;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import java.util.List;
 import java.util.Set;
@@ -158,7 +156,7 @@
    * This class represents the memory size statistics for an entire set of partitions.
    */
   class PartitionStatSet {
-    private static final Logger log = LoggerFactory.getLogger(PartitionStatSet.class);
+
     private final PartitionStat[] partitionStats;
 
     public PartitionStatSet(final PartitionStat... partitionStats) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinMemoryCalculatorImpl.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinMemoryCalculatorImpl.java
index 88f3ddc..368bff6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinMemoryCalculatorImpl.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/join/HashJoinMemoryCalculatorImpl.java
@@ -28,12 +28,10 @@
 
 import java.util.Map;
 import java.util.Set;
-import javax.annotation.Nullable;
 
 import static org.apache.drill.exec.physical.impl.join.HashJoinState.INITIALIZING;
 
 public class HashJoinMemoryCalculatorImpl implements HashJoinMemoryCalculator {
-  private static final Logger log = LoggerFactory.getLogger(HashJoinMemoryCalculatorImpl.class);
 
   private final double safetyFactor;
   private final double fragmentationFactor;
@@ -144,7 +142,6 @@
       return "No debugging for " + NoopBuildSidePartitioningImpl.class.getCanonicalName();
     }
 
-    @Nullable
     @Override
     public PostBuildCalculations next() {
       return new NoopPostBuildCalculationsImpl(recordsPerPartitionBatchProbe);
@@ -180,7 +177,7 @@
    * </p>
    */
   public static class BuildSidePartitioningImpl implements BuildSidePartitioning {
-    public static final Logger log = LoggerFactory.getLogger(BuildSidePartitioning.class);
+    private static final Logger logger = LoggerFactory.getLogger(BuildSidePartitioningImpl.class);
 
     private final BatchSizePredictor.Factory batchSizePredictorFactory;
     private final HashTableSizeCalculator hashTableSizeCalculator;
@@ -322,7 +319,7 @@
 
       calculateMemoryUsage();
 
-      log.debug("Creating {} partitions when {} initial partitions configured.", partitions, initialPartitions);
+      logger.debug("Creating {} partitions when {} initial partitions configured.", partitions, initialPartitions);
     }
 
     @Override
@@ -439,7 +436,7 @@
         }
 
         message = phase + message;
-        log.warn(message);
+        logger.warn(message);
       }
     }
 
@@ -527,7 +524,6 @@
       return false;
     }
 
-    @Nullable
     @Override
     public HashJoinMemoryCalculator next() {
       return null;
@@ -565,7 +561,8 @@
    * </p>
    */
   public static class PostBuildCalculationsImpl implements PostBuildCalculations {
-    private static final Logger log = LoggerFactory.getLogger(PostBuildCalculationsImpl.class);
+
+    private static final Logger logger = LoggerFactory.getLogger(PostBuildCalculationsImpl.class);
 
     public static final int MIN_RECORDS_PER_PARTITION_BATCH_PROBE = 10;
 
@@ -703,7 +700,7 @@
 
       if (memoryForPartitionBatches < 0) {
         // We just don't have enough memory. We should do our best though by using the minimum batch size.
-        log.warn("Not enough memory for probing:\n" +
+        logger.warn("Not enough memory for probing:\n" +
           "Memory available: {}\n" +
           "Max probe batch size: {}\n" +
           "Max output batch size: {}",
@@ -772,7 +769,6 @@
       return consumedMemory > memoryAvailable;
     }
 
-    @Nullable
     @Override
     public HashJoinMemoryCalculator next() {
       Preconditions.checkState(initialized);
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/limit/PartitionLimitRecordBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/limit/PartitionLimitRecordBatch.java
index 3f90a3b..48264c6 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/limit/PartitionLimitRecordBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/limit/PartitionLimitRecordBatch.java
@@ -43,7 +43,8 @@
  * implicit column for rowId for each row.
  */
 public class PartitionLimitRecordBatch extends AbstractSingleRecordBatch<PartitionLimit> {
-  private static final Logger logger = LoggerFactory.getLogger(LimitRecordBatch.class);
+
+  private static final Logger logger = LoggerFactory.getLogger(PartitionLimitRecordBatch.class);
 
   private final SelectionVector2 outgoingSv;
   private SelectionVector2 incomingSv;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/protocol/OperatorDriver.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/protocol/OperatorDriver.java
index d599f22..7193d1c 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/protocol/OperatorDriver.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/protocol/OperatorDriver.java
@@ -20,6 +20,8 @@
 import org.apache.drill.common.exceptions.UserException;
 import org.apache.drill.exec.ops.OperatorContext;
 import org.apache.drill.exec.record.RecordBatch.IterOutcome;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * State machine that drives the operator executable. Converts
@@ -42,6 +44,9 @@
  */
 
 public class OperatorDriver {
+
+  private static final Logger logger = LoggerFactory.getLogger(OperatorDriver.class);
+
   public enum State {
 
     /**
@@ -125,7 +130,7 @@
       case RUN:
         return doNext();
       default:
-        OperatorRecordBatch.logger.debug("Extra call to next() in state " + state + ": " + operatorLabel());
+        logger.debug("Extra call to next() in state {}: {}", state, operatorLabel());
         return IterOutcome.NONE;
       }
     } catch (UserException e) {
@@ -137,7 +142,7 @@
       state = State.FAILED;
       throw UserException.executionError(t)
         .addContext("Exception thrown from", operatorLabel())
-        .build(OperatorRecordBatch.logger);
+        .build(logger);
     }
   }
 
@@ -235,7 +240,7 @@
       }
     } catch (Throwable t) {
       // Ignore; we're already in a bad state.
-      OperatorRecordBatch.logger.error("Exception thrown from cancel() for " + operatorLabel(), t);
+      logger.error("Exception thrown from cancel() for {}", operatorLabel(), t);
     }
   }
 
@@ -254,7 +259,7 @@
     } catch (Throwable t) {
       throw UserException.executionError(t)
         .addContext("Exception thrown from", operatorLabel())
-        .build(OperatorRecordBatch.logger);
+        .build(logger);
     } finally {
       opContext.close();
       state = State.CLOSED;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/window/FrameSupportTemplate.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/window/FrameSupportTemplate.java
index 1e477ec..5288776 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/window/FrameSupportTemplate.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/window/FrameSupportTemplate.java
@@ -26,6 +26,8 @@
 import org.apache.drill.exec.record.VectorWrapper;
 import org.apache.drill.exec.vector.BaseDataValueVector;
 import org.apache.drill.exec.vector.ValueVector;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.inject.Named;
 import java.util.List;
@@ -37,7 +39,8 @@
  * This class will handle such functions even if the FRAME clause is not present.
  */
 public abstract class FrameSupportTemplate implements WindowFramer {
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(NoFrameSupportTemplate.class);
+
+  private static final Logger logger = LoggerFactory.getLogger(FrameSupportTemplate.class);
 
   private VectorContainer container;
   private VectorContainer internal;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SortConfig.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SortConfig.java
index 1694743..2ecbcb9 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SortConfig.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SortConfig.java
@@ -20,9 +20,12 @@
 import org.apache.drill.common.config.DrillConfig;
 import org.apache.drill.exec.ExecConstants;
 import org.apache.drill.exec.server.options.OptionManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class SortConfig {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ExternalSortBatch.class);
+
+  private static final Logger logger = LoggerFactory.getLogger(SortConfig.class);
 
   /**
    * Smallest allowed output batch size. The smallest output batch
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SortImpl.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SortImpl.java
index e2b0e37..e037600 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SortImpl.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SortImpl.java
@@ -39,6 +39,8 @@
 
 import org.apache.drill.shaded.guava.com.google.common.annotations.VisibleForTesting;
 import org.apache.drill.exec.vector.ValueVector;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.drill.exec.record.RecordBatch.IterOutcome.EMIT;
 
@@ -53,7 +55,8 @@
  */
 
 public class SortImpl {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ExternalSortBatch.class);
+
+  private static final Logger logger = LoggerFactory.getLogger(SortImpl.class);
 
   /**
    * Iterates over the final sorted results. Implemented differently
@@ -378,8 +381,8 @@
 
   private void validateBatchSize(long actualBatchSize, long memoryDelta) {
     if (actualBatchSize != memoryDelta) {
-      ExternalSortBatch.logger.debug("Memory delta: {}, actual batch size: {}, Diff: {}",
-                   memoryDelta, actualBatchSize, memoryDelta - actualBatchSize);
+      logger.debug("Memory delta: {}, actual batch size: {}, Diff: {}",
+        memoryDelta, actualBatchSize, memoryDelta - actualBatchSize);
     }
   }
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SortMemoryManager.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SortMemoryManager.java
index 3a3ad0e..7bfc1d3 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SortMemoryManager.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/impl/xsort/SortMemoryManager.java
@@ -18,6 +18,8 @@
 package org.apache.drill.exec.physical.impl.xsort;
 
 import org.apache.drill.shaded.guava.com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Computes the memory needs for input batches, spill batches and merge
@@ -60,7 +62,7 @@
  * As a result, we can never be sure of the amount of memory needed for a
  * batch. So, we have to estimate based on a number of factors:
  * <ul>
- * <li>Uses the {@link RecordBatchSizer} to estimate the data size and
+ * <li>Uses the {@link org.apache.drill.exec.record.RecordBatchSizer} to estimate the data size and
  * buffer size of each incoming batch.</li>
  * <li>Estimates the internal fragmentation due to power-of-two rounding.</li>
  * <li>Configured preferences for spill and output batches.</li>
@@ -93,7 +95,8 @@
  */
 
 public class SortMemoryManager {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ExternalSortBatch.class);
+
+  private static final Logger logger = LoggerFactory.getLogger(SortMemoryManager.class);
 
   /**
    * Estimate for typical internal fragmentation in a buffer due to power-of-two
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/SingleVectorState.java b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/SingleVectorState.java
index 60fb7f4..033e427 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/SingleVectorState.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/physical/resultSet/impl/SingleVectorState.java
@@ -30,6 +30,8 @@
 import org.apache.drill.exec.vector.accessor.impl.HierarchicalFormatter;
 import org.apache.drill.exec.vector.accessor.writer.OffsetVectorWriter;
 import org.apache.drill.exec.vector.accessor.writer.WriterEvents;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Base class for a single vector. Handles the bulk of work for that vector.
@@ -42,6 +44,8 @@
 
   public abstract static class SimpleVectorState extends SingleVectorState {
 
+    private static final Logger logger = LoggerFactory.getLogger(SimpleVectorState.class);
+
     public SimpleVectorState(WriterEvents writer,
         ValueVector mainVector) {
       super(writer, mainVector);
@@ -50,11 +54,11 @@
     @Override
     protected void copyOverflow(int sourceStartIndex, int sourceEndIndex) {
       int newIndex = 0;
-      ResultSetLoaderImpl.logger.trace("Vector {} of type {}: copy {} values from {} to {}",
-          mainVector.getField().toString(),
-          mainVector.getClass().getSimpleName(),
-          Math.max(0, sourceEndIndex - sourceStartIndex + 1),
-          sourceStartIndex, newIndex);
+      logger.trace("Vector {} of type {}: copy {} values from {} to {}",
+        mainVector.getField().toString(),
+        mainVector.getClass().getSimpleName(),
+        Math.max(0, sourceEndIndex - sourceStartIndex + 1),
+        sourceStartIndex, newIndex);
 
       // Copy overflow values from the full vector to the new
       // look-ahead vector. Uses vector-level operations for convenience.
@@ -139,6 +143,8 @@
 
   public static class OffsetVectorState extends SingleVectorState {
 
+    private static final Logger logger = LoggerFactory.getLogger(OffsetVectorState.class);
+
     /**
      * The child writer used to determine positions on overflow.
      * The repeated list vector defers creating the child until the
@@ -203,9 +209,9 @@
       UInt4Vector.Mutator destMutator = ((UInt4Vector) mainVector).getMutator();
       int offset = childWriter.rowStartIndex();
       int newIndex = 1;
-      ResultSetLoaderImpl.logger.trace("Offset vector: copy {} values from {} to {} with offset {}",
-          Math.max(0, sourceEndIndex - sourceStartIndex + 1),
-          sourceStartIndex, newIndex, offset);
+      logger.trace("Offset vector: copy {} values from {} to {} with offset {}",
+        Math.max(0, sourceEndIndex - sourceStartIndex + 1),
+        sourceStartIndex, newIndex, offset);
       assert offset == sourceAccessor.get(sourceStartIndex - 1);
 
       // Position zero is special and will be filled in by the writer
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PlannerPhase.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PlannerPhase.java
index 203150f..49b6d38 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PlannerPhase.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/PlannerPhase.java
@@ -103,7 +103,6 @@
  * Only rules which use DrillRelFactories should be used in this enum.
  */
 public enum PlannerPhase {
-  //private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(DrillRuleSets.class);
 
   LOGICAL_PRUNE_AND_JOIN("Logical Planning (with join and partition pruning)") {
     public RuleSet getRules(OptimizerRulesContext context, Collection<StoragePlugin> plugins) {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillRelOptUtil.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillRelOptUtil.java
index d761ba8..22248fb 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillRelOptUtil.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/common/DrillRelOptUtil.java
@@ -27,7 +27,6 @@
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.calcite.plan.RelOptPlanner;
 import org.apache.calcite.rex.RexFieldAccess;
 import org.apache.drill.metastore.statistics.TableStatisticsKind;
 import org.apache.drill.metastore.metadata.TableMetadata;
@@ -70,6 +69,8 @@
 import org.apache.drill.shaded.guava.com.google.common.collect.ImmutableMap;
 import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
 import org.apache.drill.shaded.guava.com.google.common.collect.Sets;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Utility class that is a subset of the RelOptUtil class and is a placeholder for Drill specific
@@ -77,6 +78,8 @@
  */
 public abstract class DrillRelOptUtil {
 
+  private static final Logger logger = LoggerFactory.getLogger(DrillRelOptUtil.class);
+
   final public static String IMPLICIT_COLUMN = "$drill_implicit_field$";
 
   // Similar to RelOptUtil.areRowTypesEqual() with the additional check for allowSubstring
@@ -603,7 +606,7 @@
             || (tableMetadata = table.getGroupScan().getTableMetadata()) == null
             || !TableStatisticsKind.HAS_DESCRIPTIVE_STATISTICS.getValue(tableMetadata);
       } catch (IOException e) {
-        RelOptPlanner.LOGGER.debug("Unable to obtain table metadata due to exception:", e);
+        logger.debug("Unable to obtain table metadata due to exception: {}", e.getMessage(), e);
         return true;
       }
     } else {
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/AbstractIndexDescriptor.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/AbstractIndexDescriptor.java
index dd042da..d418157 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/AbstractIndexDescriptor.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/AbstractIndexDescriptor.java
@@ -33,7 +33,6 @@
  *
  */
 public abstract class AbstractIndexDescriptor extends DrillIndexDefinition implements IndexDescriptor {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(AbstractIndexDescriptor .class);
 
   public AbstractIndexDescriptor(List<LogicalExpression> indexCols,
                                  CollationContext indexCollationContext,
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/DrillIndexDescriptor.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/DrillIndexDescriptor.java
index 3c2d21a..9f25896 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/DrillIndexDescriptor.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/DrillIndexDescriptor.java
@@ -25,12 +25,16 @@
 import org.apache.drill.exec.physical.base.IndexGroupScan;
 import org.apache.drill.exec.planner.cost.PluginCost;
 import org.apache.drill.exec.planner.logical.DrillTable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.List;
 
 public class DrillIndexDescriptor extends AbstractIndexDescriptor {
 
+  private static final Logger logger = LoggerFactory.getLogger(DrillIndexDescriptor.class);
+
   /**
    * The name of Drill's Storage Plugin on which the Index was stored
    */
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/generators/CoveringPlanNoFilterGenerator.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/generators/CoveringPlanNoFilterGenerator.java
index e06ac8f..548fa31 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/generators/CoveringPlanNoFilterGenerator.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/index/generators/CoveringPlanNoFilterGenerator.java
@@ -38,10 +38,15 @@
 import org.apache.drill.exec.planner.physical.Prule;
 import org.apache.drill.exec.planner.physical.Prel;
 import org.apache.drill.exec.physical.base.DbGroupScan;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.util.List;
 
 public class CoveringPlanNoFilterGenerator extends AbstractIndexPlanGenerator {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(CoveringIndexPlanGenerator.class);
+
+  private static final Logger logger = LoggerFactory.getLogger(CoveringPlanNoFilterGenerator.class);
+
   final protected IndexGroupScan indexGroupScan;
   final protected IndexDescriptor indexDesc;
   final boolean isSingletonSortedStream;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/AbstractSqlSetHandler.java b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/AbstractSqlSetHandler.java
index 71d7636..e6bf370 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/AbstractSqlSetHandler.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/sql/handlers/AbstractSqlSetHandler.java
@@ -24,12 +24,15 @@
 import org.apache.drill.exec.server.options.OptionValue;
 import org.apache.drill.exec.server.options.QueryOptionManager;
 import org.apache.drill.exec.util.ImpersonationUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Base handler for SQL_SET kind statements.
  */
 abstract class AbstractSqlSetHandler extends AbstractSqlHandler {
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(AbstractSqlHandler.class);
+
+  private static final Logger logger = LoggerFactory.getLogger(AbstractSqlSetHandler.class);
 
   final QueryContext context;
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/record/AbstractRecordBatch.java b/exec/java-exec/src/main/java/org/apache/drill/exec/record/AbstractRecordBatch.java
index 4a83046..f05b9f2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/record/AbstractRecordBatch.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/record/AbstractRecordBatch.java
@@ -29,7 +29,7 @@
 import org.apache.drill.exec.ops.OperatorContext;
 import org.apache.drill.exec.ops.OperatorStats;
 import org.apache.drill.exec.physical.base.PhysicalOperator;
-import org.apache.drill.exec.physical.impl.aggregate.SpilledRecordbatch;
+import org.apache.drill.exec.physical.impl.aggregate.SpilledRecordBatch;
 import org.apache.drill.exec.record.selection.SelectionVector2;
 import org.apache.drill.exec.record.selection.SelectionVector4;
 import org.apache.drill.exec.server.options.OptionValue;
@@ -124,7 +124,7 @@
       stats.startProcessing();
     }
 
-    if (b instanceof SpilledRecordbatch) {
+    if (b instanceof SpilledRecordBatch) {
       // Don't double count records which were already read and spilled.
       // TODO evaluate whether swapping out upstream record batch with a SpilledRecordBatch
       // is the right thing to do.
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/PersistedOptionValue.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/PersistedOptionValue.java
index 465c47f..d670c40 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/PersistedOptionValue.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/PersistedOptionValue.java
@@ -21,7 +21,6 @@
 import com.fasterxml.jackson.annotation.JsonInclude;
 import com.fasterxml.jackson.annotation.JsonProperty;
 import com.fasterxml.jackson.core.JsonParser;
-import com.fasterxml.jackson.core.JsonProcessingException;
 import com.fasterxml.jackson.core.ObjectCodec;
 import com.fasterxml.jackson.databind.DeserializationContext;
 import com.fasterxml.jackson.databind.JavaType;
@@ -30,6 +29,8 @@
 import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
 import com.fasterxml.jackson.databind.deser.std.StdDeserializer;
 import org.apache.drill.shaded.guava.com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 
@@ -274,7 +275,7 @@
    */
   @SuppressWarnings("serial")
   public static class Deserializer extends StdDeserializer<PersistedOptionValue> {
-    private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(Deserializer.class);
+    private static final Logger logger = LoggerFactory.getLogger(Deserializer.class);
 
     private Deserializer() {
       super(PersistedOptionValue.class);
@@ -289,7 +290,7 @@
     }
 
     @Override
-    public PersistedOptionValue deserialize(JsonParser p, DeserializationContext ctxt) throws IOException, JsonProcessingException {
+    public PersistedOptionValue deserialize(JsonParser p, DeserializationContext ctxt) throws IOException {
       ObjectCodec oc = p.getCodec();
       JsonNode node = oc.readTree(p);
       String value = null;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/ThreadsResources.java b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/ThreadsResources.java
index d63deef..b8bc6f2 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/ThreadsResources.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/server/rest/ThreadsResources.java
@@ -32,7 +32,6 @@
 @Path("/threads")
 @RolesAllowed(DrillUserPrincipal.ADMIN_ROLE)
 public class ThreadsResources {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(MetricsResources.class);
 
   @Inject UserAuthEnabled authEnabled;
   @Inject SecurityContext sc;
@@ -42,5 +41,4 @@
   public Viewable getMetrics() {
     return ViewableWithPermissions.create(authEnabled.get(), "/rest/threads/threads.ftl", sc);
   }
-
 }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/ssl/SSLConfigBuilder.java b/exec/java-exec/src/main/java/org/apache/drill/exec/ssl/SSLConfigBuilder.java
index 1174e64..7571e6b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ssl/SSLConfigBuilder.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ssl/SSLConfigBuilder.java
@@ -24,12 +24,8 @@
 
 import java.util.Properties;
 
-
 public class SSLConfigBuilder {
 
-  private static final org.slf4j.Logger logger =
-      org.slf4j.LoggerFactory.getLogger(org.apache.drill.exec.ssl.SSLConfigBuilder.class);
-
   private DrillConfig config = null;
   private Configuration hadoopConfig = null;
   private Properties properties;
@@ -37,10 +33,6 @@
   private boolean initializeSSLContext = false;
   private boolean validateKeyStore = false;
 
-  public SSLConfigBuilder() {
-
-  }
-
   public SSLConfig build() throws DrillException {
     if (mode == SSLConfig.Mode.SERVER && config == null) {
       throw new DrillConfigurationException(
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/reader/RepeatedVarCharOutput.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/reader/RepeatedVarCharOutput.java
index fdf3e53..57469ed 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/reader/RepeatedVarCharOutput.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/easy/text/reader/RepeatedVarCharOutput.java
@@ -32,7 +32,7 @@
  */
 public class RepeatedVarCharOutput extends BaseFieldOutput {
 
-  private static final Logger logger = LoggerFactory.getLogger(BaseFieldOutput.class);
+  private static final Logger logger = LoggerFactory.getLogger(RepeatedVarCharOutput.class);
 
   private final ScalarWriter columnWriter;
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatPlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatPlugin.java
index 4785da1..63eeb7a 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatPlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogFormatPlugin.java
@@ -62,7 +62,8 @@
 
 public class HttpdLogFormatPlugin extends EasyFormatPlugin<HttpdLogFormatConfig> {
 
-  private static final Logger LOG = LoggerFactory.getLogger(HttpdLogFormatPlugin.class);
+  private static final Logger logger = LoggerFactory.getLogger(HttpdLogFormatPlugin.class);
+
   private static final String PLUGIN_EXTENSION = "httpd";
   private static final int VECTOR_MEMORY_ALLOCATION = 4095;
 
@@ -79,12 +80,12 @@
   }
 
   @Override
-  public TableStatistics readStatistics(FileSystem fs, Path statsTablePath) throws IOException {
+  public TableStatistics readStatistics(FileSystem fs, Path statsTablePath) {
     throw new UnsupportedOperationException("unimplemented");
   }
 
   @Override
-  public void writeStatistics(TableStatistics statistics, FileSystem fs, Path statsTablePath) throws IOException {
+  public void writeStatistics(TableStatistics statistics, FileSystem fs, Path statsTablePath) {
     throw new UnsupportedOperationException("unimplemented");
   }
 
@@ -123,7 +124,7 @@
           String parserField = HttpdParser.parserFormattedFieldName(drillField);
           fieldMapping.put(drillField, parserField);
         } catch (Exception e) {
-          LOG.info("Putting field: " + drillField + " into map", e);
+          logger.info("Putting field: {} into map", drillField, e);
         }
       }
       return fieldMapping;
@@ -132,7 +133,7 @@
     @Override
     public void setup(final OperatorContext context, final OutputMutator output) throws ExecutionSetupException {
       try {
-        /**
+        /*
          * Extract the list of field names for the parser to use if it is NOT a star query. If it is a star query just
          * pass through an empty map, because the parser is going to have to build all possibilities.
          */
@@ -166,7 +167,7 @@
               .addContext("Split Start", work.getStart())
               .addContext("Split Length", work.getLength())
               .addContext("Local Line Number", lineNumber.get())
-              .build(LOG);
+              .build(logger);
     }
 
     /**
@@ -203,7 +204,7 @@
           lineReader.close();
         }
       } catch (IOException e) {
-        LOG.warn("Failure while closing Httpd reader.", e);
+        logger.warn("Failure while closing Httpd reader.", e);
       }
     }
 
@@ -229,12 +230,12 @@
   }
 
   @Override
-  public RecordReader getRecordReader(final FragmentContext context, final DrillFileSystem dfs, final FileWork fileWork, final List<SchemaPath> columns, final String userName) throws ExecutionSetupException {
+  public RecordReader getRecordReader(final FragmentContext context, final DrillFileSystem dfs, final FileWork fileWork, final List<SchemaPath> columns, final String userName) {
     return new HttpdLogRecordReader(context, dfs, fileWork, columns);
   }
 
   @Override
-  public RecordWriter getRecordWriter(final FragmentContext context, final EasyWriter writer) throws IOException {
+  public RecordWriter getRecordWriter(final FragmentContext context, final EasyWriter writer) {
     throw new UnsupportedOperationException("Drill doesn't currently support writing HTTPd logs");
   }
 
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogRecord.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogRecord.java
index 95917cb..45c251d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogRecord.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdLogRecord.java
@@ -40,7 +40,8 @@
 
 public class HttpdLogRecord {
 
-  private static final Logger LOG = LoggerFactory.getLogger(HttpdLogRecord.class);
+  private static final Logger logger = LoggerFactory.getLogger(HttpdLogRecord.class);
+
   private final Map<String, VarCharWriter> strings = Maps.newHashMap();
   private final Map<String, BigIntWriter> longs = Maps.newHashMap();
   private final Map<String, Float8Writer> doubles = Maps.newHashMap();
@@ -98,10 +99,10 @@
     if (value != null) {
       final VarCharWriter w = strings.get(field);
       if (w != null) {
-        LOG.trace("Parsed field: {}, as string: {}", field, value);
+        logger.trace("Parsed field: {}, as string: {}", field, value);
         writeString(w, value);
       } else {
-        LOG.warn("No 'string' writer found for field: {}", field);
+        logger.warn("No 'string' writer found for field: {}", field);
       }
     }
   }
@@ -118,10 +119,10 @@
     if (value != null) {
       final BigIntWriter w = longs.get(field);
       if (w != null) {
-        LOG.trace("Parsed field: {}, as long: {}", field, value);
+        logger.trace("Parsed field: {}, as long: {}", field, value);
         w.writeBigInt(value);
       } else {
-        LOG.warn("No 'long' writer found for field: {}", field);
+        logger.warn("No 'long' writer found for field: {}", field);
       }
     }
   }
@@ -147,10 +148,10 @@
       }
       final TimeStampWriter tw = times.get(field);
       if (tw != null) {
-        LOG.trace("Parsed field: {}, as time: {}", field, value);
+        logger.trace("Parsed field: {}, as time: {}", field, value);
         tw.writeTimeStamp(ts);
       } else {
-        LOG.warn("No 'timestamp' writer found for field: {}", field);
+        logger.warn("No 'timestamp' writer found for field: {}", field);
       }
     }
   }
@@ -167,10 +168,10 @@
     if (value != null) {
       final Float8Writer w = doubles.get(field);
       if (w != null) {
-        LOG.trace("Parsed field: {}, as double: {}", field, value);
+        logger.trace("Parsed field: {}, as double: {}", field, value);
         w.writeFloat8(value);
       } else {
-        LOG.warn("No 'double' writer found for field: {}", field);
+        logger.warn("No 'double' writer found for field: {}", field);
       }
     }
   }
@@ -187,7 +188,7 @@
   public void setWildcard(String field, String value) {
     if (value != null) {
       final MapWriter mapWriter = getWildcardWriter(field);
-      LOG.trace("Parsed wildcard field: {}, as string: {}", field, value);
+      logger.trace("Parsed wildcard field: {}, as string: {}", field, value);
       final VarCharWriter w = mapWriter.varChar(cleanExtensions.get(field));
       writeString(w, value);
     }
@@ -205,7 +206,7 @@
   public void setWildcard(String field, Long value) {
     if (value != null) {
       final MapWriter mapWriter = getWildcardWriter(field);
-      LOG.trace("Parsed wildcard field: {}, as long: {}", field, value);
+      logger.trace("Parsed wildcard field: {}, as long: {}", field, value);
       final BigIntWriter w = mapWriter.bigInt(cleanExtensions.get(field));
       w.writeBigInt(value);
     }
@@ -223,7 +224,7 @@
   public void setWildcard(String field, Double value) {
     if (value != null) {
       final MapWriter mapWriter = getWildcardWriter(field);
-      LOG.trace("Parsed wildcard field: {}, as double: {}", field, value);
+      logger.trace("Parsed wildcard field: {}, as double: {}", field, value);
       final Float8Writer w = mapWriter.float8(cleanExtensions.get(field));
       w.writeFloat8(value);
     }
@@ -253,7 +254,7 @@
             final String extension = field.substring(root.length() + 1);
             final String cleanExtension = HttpdParser.drillFormattedFieldName(extension);
             cleanExtensions.put(field, cleanExtension);
-            LOG.debug("Added extension: field='{}' with cleanExtension='{}'", field, cleanExtension);
+            logger.debug("Added extension: field='{}' with cleanExtension='{}'", field, cleanExtension);
           }
 
           /**
@@ -264,7 +265,7 @@
             /**
              * Start and store this root map writer for later retrieval.
              */
-            LOG.debug("Starting new wildcard field writer: {}", field);
+            logger.debug("Starting new wildcard field writer: {}", field);
             writer.start();
             startedWildcards.put(field, writer);
             wildcardWriters.put(root, writer);
@@ -318,21 +319,21 @@
      */
     if (hasWildcard) {
       final String cleanName = parserFieldName.substring(0, parserFieldName.length() - HttpdParser.PARSER_WILDCARD.length());
-      LOG.debug("Adding WILDCARD parse target: {} as {}, with field name: {}", parserFieldName, cleanName, drillFieldName);
+      logger.debug("Adding WILDCARD parse target: {} as {}, with field name: {}", parserFieldName, cleanName, drillFieldName);
       parser.addParseTarget(this.getClass().getMethod("setWildcard", String.class, String.class), parserFieldName);
       parser.addParseTarget(this.getClass().getMethod("setWildcard", String.class, Double.class), parserFieldName);
       parser.addParseTarget(this.getClass().getMethod("setWildcard", String.class, Long.class), parserFieldName);
       wildcards.put(cleanName, mapWriter.map(drillFieldName));
     } else if (type.contains(Casts.DOUBLE)) {
-      LOG.debug("Adding DOUBLE parse target: {}, with field name: {}", parserFieldName, drillFieldName);
+      logger.debug("Adding DOUBLE parse target: {}, with field name: {}", parserFieldName, drillFieldName);
       parser.addParseTarget(this.getClass().getMethod("set", String.class, Double.class), parserFieldName);
       doubles.put(parserFieldName, mapWriter.float8(drillFieldName));
     } else if (type.contains(Casts.LONG)) {
-      LOG.debug("Adding LONG parse target: {}, with field name: {}", parserFieldName, drillFieldName);
+      logger.debug("Adding LONG parse target: {}, with field name: {}", parserFieldName, drillFieldName);
       parser.addParseTarget(this.getClass().getMethod("set", String.class, Long.class), parserFieldName);
       longs.put(parserFieldName, mapWriter.bigInt(drillFieldName));
     } else {
-      LOG.debug("Adding STRING parse target: {}, with field name: {}", parserFieldName, drillFieldName);
+      logger.debug("Adding STRING parse target: {}, with field name: {}", parserFieldName, drillFieldName);
       if (parserFieldName.startsWith("TIME.STAMP:")) {
         parser.addParseTarget(this.getClass().getMethod("setTimestamp", String.class, String.class), parserFieldName);
         times.put(parserFieldName, mapWriter.timeStamp(drillFieldName));
@@ -342,4 +343,4 @@
       }
     }
   }
-}
\ No newline at end of file
+}
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdParser.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdParser.java
index 5d3d7c0..7da7a95 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdParser.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/httpd/HttpdParser.java
@@ -37,7 +37,8 @@
 
 public class HttpdParser {
 
-  private static final Logger LOG = LoggerFactory.getLogger(HttpdParser.class);
+  private static final Logger logger = LoggerFactory.getLogger(HttpdParser.class);
+
   public static final String PARSER_WILDCARD = ".*";
   public static final String SAFE_WILDCARD = "_$";
   public static final String SAFE_SEPARATOR = "_";
@@ -306,10 +307,10 @@
     setupParser(mapWriter, logFormat, fieldMapping);
 
     if (timestampFormat != null && !timestampFormat.trim().isEmpty()) {
-      LOG.info("Custom timestamp format has been specified. This is an informational note only as custom timestamps is rather unusual.");
+      logger.info("Custom timestamp format has been specified. This is an informational note only as custom timestamps is rather unusual.");
     }
     if (logFormat.contains("\n")) {
-      LOG.info("Specified logformat is a multiline log format: {}", logFormat);
+      logger.info("Specified logformat is a multiline log format: {}", logFormat);
     }
   }
 
@@ -335,7 +336,7 @@
    * @param fieldType HTTP.URI, etc..
    */
   private void addTypeRemapping(final Parser<HttpdLogRecord> parser, final String fieldName, final String fieldType) {
-    LOG.debug("Adding type remapping - fieldName: {}, fieldType: {}", fieldName, fieldType);
+    logger.debug("Adding type remapping - fieldName: {}, fieldType: {}", fieldName, fieldType);
     parser.addTypeRemapping(fieldName, fieldType);
   }
 
@@ -394,13 +395,13 @@
     final Map<String, String> requestedPaths;
     final List<String> allParserPaths = parser.getPossiblePaths();
     if (fieldMapping != null && !fieldMapping.isEmpty()) {
-      LOG.debug("Using fields defined by user.");
+      logger.debug("Using fields defined by user.");
       requestedPaths = fieldMapping;
     } else {
       /**
        * Use all possible paths that the parser has determined from the specified log format.
        */
-      LOG.debug("No fields defined by user, defaulting to all possible fields.");
+      logger.debug("No fields defined by user, defaulting to all possible fields.");
       requestedPaths = Maps.newHashMap();
       for (final String parserPath : allParserPaths) {
         requestedPaths.put(drillFormattedFieldName(parserPath), parserPath);
@@ -433,7 +434,7 @@
         casts = dummy.getCasts(entry.getValue());
       }
 
-      LOG.debug("Setting up drill field: {}, parser field: {}, which casts as: {}", entry.getKey(), entry.getValue(), casts);
+      logger.debug("Setting up drill field: {}, parser field: {}, which casts as: {}", entry.getKey(), entry.getValue(), casts);
       record.addField(parser, mapWriter, casts, entry.getValue(), entry.getKey());
     }
   }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockSubScanPOP.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockSubScanPOP.java
index d9d8a95..644a3be 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockSubScanPOP.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/mock/MockSubScanPOP.java
@@ -42,7 +42,6 @@
 
 @JsonTypeName("mock-sub-scan")
 public class MockSubScanPOP extends AbstractBase implements SubScan {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(MockGroupScanPOP.class);
 
   private final String url;
   protected final List<MockScanEntry> readEntries;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/InMemoryStore.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/InMemoryStore.java
index 9b76d88..abf317d 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/InMemoryStore.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/store/InMemoryStore.java
@@ -29,7 +29,6 @@
 import org.apache.drill.shaded.guava.com.google.common.collect.Iterables;
 
 public class InMemoryStore<V> extends BasePersistentStore<V> {
-  // private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(InMemoryPersistentStore.class);
 
   private final ConcurrentNavigableMap<String, V> store;
   private final int capacity;
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/vector/complex/fn/VectorOutput.java b/exec/java-exec/src/main/java/org/apache/drill/exec/vector/complex/fn/VectorOutput.java
index b65e6c2..835a873 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/vector/complex/fn/VectorOutput.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/vector/complex/fn/VectorOutput.java
@@ -56,7 +56,8 @@
 
 abstract class VectorOutput {
 
-  private static final Logger LOG = LoggerFactory.getLogger(VectorOutput.class);
+  private static final Logger logger = LoggerFactory.getLogger(VectorOutput.class);
+
   final VarBinaryHolder binary = new VarBinaryHolder();
   final TimeHolder time = new TimeHolder();
   final DateHolder date = new DateHolder();
@@ -95,7 +96,7 @@
         if(checkNextToken(JsonToken.VALUE_NUMBER_INT) || !hasBinary()) {
           throw UserException.parseError()
           .message("Either $type is not an integer or has no $binary")
-          .build(LOG);
+          .build(logger);
         }
         writeBinary(checkNextToken(JsonToken.VALUE_STRING));
         checkCurrentToken(JsonToken.END_OBJECT);
@@ -210,7 +211,7 @@
           if (type < 0 || type > 255) {
             throw UserException.validationError()
             .message("$type should be between 0 to 255")
-            .build(LOG);
+            .build(logger);
           }
         }
         work.prepareBinary(binaryData, binary);
@@ -254,7 +255,7 @@
         default:
           throw UserException.unsupportedError()
               .message(parser.getCurrentToken().toString())
-              .build(LOG);
+              .build(logger);
         }
       }
     }
@@ -312,7 +313,7 @@
           if (type < 0 || type > 255) {
             throw UserException.validationError()
             .message("$type should be between 0 to 255")
-            .build(LOG);
+            .build(logger);
           }
         }
         work.prepareBinary(binaryData, binary);
@@ -357,7 +358,7 @@
         default:
           throw UserException.unsupportedError()
           .message(parser.getCurrentToken().toString())
-          .build(LOG);
+          .build(logger);
         }
       }
     }
diff --git a/exec/java-exec/src/main/java/org/apache/parquet/hadoop/ParquetColumnChunkPageWriteStore.java b/exec/java-exec/src/main/java/org/apache/parquet/hadoop/ParquetColumnChunkPageWriteStore.java
index d7d0896..01a5485 100644
--- a/exec/java-exec/src/main/java/org/apache/parquet/hadoop/ParquetColumnChunkPageWriteStore.java
+++ b/exec/java-exec/src/main/java/org/apache/parquet/hadoop/ParquetColumnChunkPageWriteStore.java
@@ -26,7 +26,6 @@
 import org.apache.drill.shaded.guava.com.google.common.collect.Lists;
 import org.apache.drill.shaded.guava.com.google.common.collect.Maps;
 import org.apache.drill.shaded.guava.com.google.common.collect.Sets;
-import org.apache.drill.exec.store.parquet.ParquetDirectByteBufferAllocator;
 import org.apache.parquet.bytes.BytesInput;
 import org.apache.parquet.bytes.CapacityByteArrayOutputStream;
 import org.apache.parquet.column.ColumnDescriptor;
@@ -40,6 +39,8 @@
 import org.apache.parquet.io.ParquetEncodingException;
 import org.apache.parquet.schema.MessageType;
 import org.apache.parquet.bytes.ByteBufferAllocator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This is a copy of ColumnChunkPageWriteStore from parquet library except of OutputStream that is used here.
@@ -47,7 +48,8 @@
  * It will be no need in this class once PARQUET-1006 is resolved.
  */
 public class ParquetColumnChunkPageWriteStore implements PageWriteStore, Closeable {
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(ParquetDirectByteBufferAllocator.class);
+
+  private static final Logger logger = LoggerFactory.getLogger(ParquetColumnChunkPageWriteStore.class);
 
   private static ParquetMetadataConverter parquetMetadataConverter = new ParquetMetadataConverter();
 
diff --git a/exec/java-exec/src/test/java/org/apache/drill/TestDisabledFunctionality.java b/exec/java-exec/src/test/java/org/apache/drill/TestDisabledFunctionality.java
index 1dcd691..be2f1f4 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/TestDisabledFunctionality.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/TestDisabledFunctionality.java
@@ -31,7 +31,6 @@
 
 @Category(UnlikelyTest.class)
 public class TestDisabledFunctionality extends BaseTestQuery {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestExampleQueries.class);
 
   @Test(expected = UserException.class)  // see DRILL-2054
   public void testBooleanORWhereClause() throws Exception {
@@ -353,4 +352,4 @@
     }
   }
 
-}
\ No newline at end of file
+}
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/common/HashPartitionTest.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/common/HashPartitionTest.java
index ce79467..e98b4c0 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/common/HashPartitionTest.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/common/HashPartitionTest.java
@@ -33,7 +33,7 @@
 import org.apache.drill.exec.ops.OperatorContext;
 import org.apache.drill.exec.physical.config.HashJoinPOP;
 import org.apache.drill.exec.physical.impl.MockRecordBatch;
-import org.apache.drill.exec.physical.impl.aggregate.SpilledRecordbatch;
+import org.apache.drill.exec.physical.impl.aggregate.SpilledRecordBatch;
 import org.apache.drill.exec.physical.impl.join.HashJoinMemoryCalculator;
 import org.apache.drill.exec.physical.impl.join.HashJoinMemoryCalculatorImpl;
 import org.apache.drill.exec.physical.impl.join.JoinUtils;
@@ -227,7 +227,7 @@
         final int batchesCount = hashPartition.getPartitionBatchesCount();
         hashPartition.closeWriter();
 
-        SpilledRecordbatch spilledBuildBatch = new SpilledRecordbatch(spillFile, batchesCount, context, buildSchema, operatorContext, spillSet);
+        SpilledRecordBatch spilledBuildBatch = new SpilledRecordBatch(spillFile, batchesCount, context, buildSchema, operatorContext, spillSet);
         final RowSet actual = DirectRowSet.fromContainer(spilledBuildBatch.getContainer());
 
         new RowSetComparison(actualBuildRowSet).verify(actual);
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestLateralJoinCorrectnessBatchProcessing.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestLateralJoinCorrectnessBatchProcessing.java
index 1abe68e..bd67320 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestLateralJoinCorrectnessBatchProcessing.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestLateralJoinCorrectnessBatchProcessing.java
@@ -48,7 +48,6 @@
 import static org.junit.Assert.assertTrue;
 
 public class TestLateralJoinCorrectnessBatchProcessing extends SubOperatorTest {
-  //private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestNewLateralJoinCorrectness.class);
 
   // Operator Context for mock batch
   private static OperatorContext operatorContext;
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestMergeJoin.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestMergeJoin.java
index 1a0658c..5034ed7 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestMergeJoin.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/join/TestMergeJoin.java
@@ -33,7 +33,6 @@
 import org.apache.drill.exec.physical.base.FragmentRoot;
 import org.apache.drill.exec.physical.impl.ImplCreator;
 import org.apache.drill.exec.physical.impl.SimpleRootExec;
-import org.apache.drill.exec.physical.impl.aggregate.HashAggBatch;
 import org.apache.drill.exec.planner.PhysicalPlanReader;
 import org.apache.drill.exec.planner.PhysicalPlanReaderTestFactory;
 import org.apache.drill.exec.pop.PopUnitTestBase;
@@ -55,10 +54,13 @@
 
 import org.junit.experimental.categories.Category;
 import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 @Category({SlowTest.class, OperatorTest.class})
 public class TestMergeJoin extends PopUnitTestBase {
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(HashAggBatch.class);
+
+  private static final Logger logger = LoggerFactory.getLogger(TestMergeJoin.class);
   private final DrillConfig c = DrillConfig.create();
 
   @Test
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/protocol/TestOperatorRecordBatch.java b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/protocol/TestOperatorRecordBatch.java
index e35f7f6..d116e25 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/protocol/TestOperatorRecordBatch.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/physical/impl/protocol/TestOperatorRecordBatch.java
@@ -53,6 +53,8 @@
 import org.apache.drill.test.SubOperatorTest;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Test the implementation of the Drill Volcano iterator protocol that
@@ -61,7 +63,8 @@
 
 @Category(RowSetTests.class)
 public class TestOperatorRecordBatch extends SubOperatorTest {
-  private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(SubOperatorTest.class);
+
+  private static final Logger logger = LoggerFactory.getLogger(TestOperatorRecordBatch.class);
 
   /**
    * Mock operator executor that simply tracks each method call
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestPauseInjection.java b/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestPauseInjection.java
index ca67f0c..187dfb7 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestPauseInjection.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/testing/TestPauseInjection.java
@@ -17,6 +17,7 @@
  */
 package org.apache.drill.exec.testing;
 
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -39,6 +40,7 @@
 import org.apache.drill.exec.util.Pointer;
 import org.junit.Test;
 import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestPauseInjection extends BaseTestQuery {
 
@@ -53,10 +55,10 @@
   /**
    * Class whose methods we want to simulate pauses at run-time for testing
    * purposes. The class must have access to {@link org.apache.drill.exec.ops.QueryContext} or
-   * {@link FragmentContextImpl}.
+   * {@link org.apache.drill.exec.ops.FragmentContextImpl}.
    */
   private static class DummyClass {
-    private static final Logger logger = org.slf4j.LoggerFactory.getLogger(DummyClass.class);
+    private static final Logger logger = LoggerFactory.getLogger(DummyClass.class);
     private static final ControlsInjector injector = ControlsInjectorFactory.getInjector(DummyClass.class);
 
     private final QueryContext context;
@@ -136,7 +138,7 @@
     final long actualDuration = dummyClass.pauses();
     assertTrue(String.format("Test should stop for at least %d milliseconds.", expectedDuration),
       expectedDuration <= actualDuration);
-    assertTrue("No exception should be thrown.", ex.value == null);
+    assertNull("No exception should be thrown.", ex.value);
     try {
       queryContext.close();
     } catch (final Exception e) {
@@ -164,7 +166,7 @@
     final long actualDuration = dummyClass.pauses();
     assertTrue(String.format("Test should stop for at least %d milliseconds.", expectedDuration),
       expectedDuration <= actualDuration);
-    assertTrue("No exception should be thrown.", ex.value == null);
+    assertNull("No exception should be thrown.", ex.value);
     try {
       queryContext.close();
     } catch (final Exception e) {
@@ -218,7 +220,7 @@
         final DummyClass dummyClass = new DummyClass(queryContext, trigger);
         final long actualDuration = dummyClass.pauses();
         assertTrue(String.format("Test should stop for at least %d milliseconds.", expectedDuration), expectedDuration <= actualDuration);
-        assertTrue("No exception should be thrown.", ex.value == null);
+        assertNull("No exception should be thrown.", ex.value);
         try {
           queryContext.close();
         } catch (final Exception e) {
@@ -291,7 +293,7 @@
         final DummyClass dummyClass = new DummyClass(queryContext, trigger);
         final long actualDuration = dummyClass.pauses();
         assertTrue(String.format("Test should stop for at least %d milliseconds.", expectedDuration), expectedDuration <= actualDuration);
-        assertTrue("No exception should be thrown.", ex.value == null);
+        assertNull("No exception should be thrown.", ex.value);
         try {
           queryContext.close();
         } catch (final Exception e) {
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/util/MiniZooKeeperCluster.java b/exec/java-exec/src/test/java/org/apache/drill/exec/util/MiniZooKeeperCluster.java
index a5c1fa0..ccdd7d5 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/util/MiniZooKeeperCluster.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/util/MiniZooKeeperCluster.java
@@ -27,18 +27,19 @@
 import java.util.List;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.zookeeper.server.NIOServerCnxnFactory;
 import org.apache.zookeeper.server.ZooKeeperServer;
 import org.apache.zookeeper.server.persistence.FileTxnLog;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Mostly Copied from HBase's MiniZooKeeperCluster, but without the Hadoop dependency.
  */
 public class MiniZooKeeperCluster {
-  private static final Log LOG = LogFactory.getLog(MiniZooKeeperCluster.class);
+
+  private static final Logger logger = LoggerFactory.getLogger(MiniZooKeeperCluster.class);
 
   private static final int TICK_TIME = 2000;
   private static final int CONNECTION_TIMEOUT = 10000;
@@ -64,9 +65,9 @@
     this.started = false;
 //    this.configuration = configuration;
     activeZKServerIndex = -1;
-    zooKeeperServers = new ArrayList<ZooKeeperServer>();
-    clientPortList = new ArrayList<Integer>();
-    standaloneServerFactoryList = new ArrayList<NIOServerCnxnFactory>();
+    zooKeeperServers = new ArrayList<>();
+    clientPortList = new ArrayList<>();
+    standaloneServerFactoryList = new ArrayList<>();
   }
 
   public void setDefaultClientPort(int clientPort) {
@@ -153,7 +154,7 @@
           standaloneServerFactory = new NIOServerCnxnFactory();
           standaloneServerFactory.configure(new InetSocketAddress(tentativePort), 1000);
         } catch (BindException e) {
-          LOG.debug("Failed binding ZK Server to client port: " + tentativePort);
+          logger.debug("Failed binding ZK Server to client port: {}", tentativePort);
           // This port is already in use, try to use another.
           tentativePort++;
           continue;
@@ -164,7 +165,7 @@
         try {
           standaloneServerFactory.startup(server);
         } catch (IOException e) {
-          LOG.error("Zookeeper startup error", e);
+          logger.error("Zookeeper startup error", e);
           tentativePort++;
           continue;
         }
@@ -189,8 +190,8 @@
     activeZKServerIndex = 0;
     started = true;
     clientPort = clientPortList.get(activeZKServerIndex);
-    LOG.info("Started MiniZK Cluster and connect 1 ZK server " +
-      "on client port: " + clientPort);
+    logger.info("Started MiniZK Cluster and connect 1 ZK server " +
+      "on client port: {}", clientPort);
     return clientPort;
   }
 
@@ -231,7 +232,7 @@
     clientPortList.clear();
     zooKeeperServers.clear();
 
-    LOG.info("Shutdown MiniZK cluster with all ZK servers");
+    logger.info("Shutdown MiniZK cluster with all ZK servers");
   }
 
   /**
@@ -260,16 +261,16 @@
     standaloneServerFactoryList.remove(activeZKServerIndex);
     clientPortList.remove(activeZKServerIndex);
     zooKeeperServers.remove(activeZKServerIndex);
-    LOG.info("Kill the current active ZK servers in the cluster " +
-      "on client port: " + clientPort);
+    logger.info("Kill the current active ZK servers in the cluster " +
+      "on client port: {}", clientPort);
 
     if (standaloneServerFactoryList.size() == 0) {
       // there is no backup servers;
       return -1;
     }
     clientPort = clientPortList.get(activeZKServerIndex);
-    LOG.info("Activate a backup zk server in the cluster " +
-      "on client port: " + clientPort);
+    logger.info("Activate a backup zk server in the cluster " +
+      "on client port: {}", clientPort);
     // return the next back zk server's port
     return clientPort;
   }
@@ -302,8 +303,8 @@
     standaloneServerFactoryList.remove(backupZKServerIndex);
     clientPortList.remove(backupZKServerIndex);
     zooKeeperServers.remove(backupZKServerIndex);
-    LOG.info("Kill one backup ZK servers in the cluster " +
-      "on client port: " + clientPort);
+    logger.info("Kill one backup ZK servers in the cluster " +
+      "on client port: {}", clientPort);
   }
 
   // XXX: From o.a.zk.t.ClientBase
diff --git a/exec/java-exec/src/test/java/org/apache/drill/exec/vector/complex/writer/TestComplexTypeWriter.java b/exec/java-exec/src/test/java/org/apache/drill/exec/vector/complex/writer/TestComplexTypeWriter.java
index e9af52f..e80b9f0 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/vector/complex/writer/TestComplexTypeWriter.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/vector/complex/writer/TestComplexTypeWriter.java
@@ -20,8 +20,7 @@
 import org.apache.drill.test.BaseTestQuery;
 import org.junit.Test;
 
-public class TestComplexTypeWriter  extends BaseTestQuery{
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestComplexTypeReader.class);
+public class TestComplexTypeWriter  extends BaseTestQuery {
 
   @Test
   //basic case. convert varchar into json.
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/DrillTestWrapper.java b/exec/java-exec/src/test/java/org/apache/drill/test/DrillTestWrapper.java
index d943d75..2fa8c75 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/test/DrillTestWrapper.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/DrillTestWrapper.java
@@ -59,6 +59,8 @@
 import org.apache.drill.exec.vector.ValueVector;
 import org.apache.drill.test.rowSet.RowSetComparison;
 import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An object to encapsulate the options for a Drill unit test, as well as the execution methods to perform the tests and
@@ -68,7 +70,8 @@
  * the BaseTestQuery class, and instance of the builder is accessible through the testBuilder() method.
  */
 public class DrillTestWrapper {
-  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(BaseTestQuery.class);
+
+  private static final Logger logger = LoggerFactory.getLogger(DrillTestWrapper.class);
 
   public interface TestServices {
     BufferAllocator allocator();
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillConnectionImpl.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillConnectionImpl.java
index 723fb9e..59b28ae 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillConnectionImpl.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillConnectionImpl.java
@@ -69,6 +69,7 @@
 import org.slf4j.Logger;
 
 import org.apache.drill.shaded.guava.com.google.common.base.Throwables;
+import org.slf4j.LoggerFactory;
 
 import static org.apache.drill.exec.util.StoragePluginTestUtils.DFS_PLUGIN_NAME;
 import static org.apache.drill.exec.util.StoragePluginTestUtils.ROOT_SCHEMA;
@@ -85,10 +86,9 @@
 // (Was abstract to avoid errors _here_ if newer versions of JDBC added
 // interface methods, but now newer versions would probably use Java 8's default
 // methods for compatibility.)
-public class DrillConnectionImpl extends AvaticaConnection
-                          implements DrillConnection {
-  private static final org.slf4j.Logger logger =
-      org.slf4j.LoggerFactory.getLogger(DrillConnection.class);
+public class DrillConnectionImpl extends AvaticaConnection implements DrillConnection {
+
+  private static final Logger logger = LoggerFactory.getLogger(DrillConnectionImpl.class);
 
   final DrillStatementRegistry openStatementsRegistry = new DrillStatementRegistry();
   final DrillConnectionConfig config;
diff --git a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java
index 86b0b27..68c944a 100644
--- a/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java
+++ b/exec/jdbc/src/main/java/org/apache/drill/jdbc/impl/DrillCursor.java
@@ -55,17 +55,16 @@
 import org.apache.drill.jdbc.SchemaChangeListener;
 import org.apache.drill.jdbc.SqlTimeoutException;
 import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.drill.shaded.guava.com.google.common.collect.Queues;
 
-
 public class DrillCursor implements Cursor {
 
   ////////////////////////////////////////
   // ResultsListener:
   static class ResultsListener implements UserResultsListener {
-    private static final org.slf4j.Logger logger =
-        org.slf4j.LoggerFactory.getLogger(ResultsListener.class);
+    private static final Logger logger = LoggerFactory.getLogger(ResultsListener.class);
 
     private static volatile int nextInstanceId = 1;