HBASE-23579 Fixed Checkstyle issues

Signed-off-by: Peter Somogyi <psomogyi@apache.org>
Signed-off-by: Xu Cang <xucang@apache.org>
diff --git a/kafka/hbase-kafka-proxy/pom.xml b/kafka/hbase-kafka-proxy/pom.xml
index 3f5e287..04c6877 100755
--- a/kafka/hbase-kafka-proxy/pom.xml
+++ b/kafka/hbase-kafka-proxy/pom.xml
@@ -57,6 +57,10 @@
           <skipAssembly>true</skipAssembly>
         </configuration>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+      </plugin>
     </plugins>
   </build>
   <dependencies>
diff --git a/kafka/hbase-kafka-proxy/src/main/java/org/apache/hadoop/hbase/kafka/KafkaProxy.java b/kafka/hbase-kafka-proxy/src/main/java/org/apache/hadoop/hbase/kafka/KafkaProxy.java
index 4637bd0..16bf544 100755
--- a/kafka/hbase-kafka-proxy/src/main/java/org/apache/hadoop/hbase/kafka/KafkaProxy.java
+++ b/kafka/hbase-kafka-proxy/src/main/java/org/apache/hadoop/hbase/kafka/KafkaProxy.java
@@ -159,9 +159,7 @@
     commandLineConf.clear();
 
     GenericOptionsParser parser = new GenericOptionsParser(commandLineConf, args);
-    String restArgs[] =parser.getRemainingArgs();
-
-
+    String[] restArgs = parser.getRemainingArgs();
 
     try {
       commandLine = new BasicParser().parse(options, restArgs);
@@ -279,10 +277,8 @@
     byte []uuidBytes = Bytes.toBytes(newValue);
     String idPath=rootZnode+"/hbaseid";
     if (zk.checkExists().forPath(idPath) == null) {
-     // zk.create().creatingParentsIfNeeded().forPath(rootZnode +
-     //     "/hbaseid",uuidBytes);
-        zk.create().forPath(rootZnode);
-        zk.create().forPath(rootZnode +"/hbaseid",uuidBytes);
+      zk.create().forPath(rootZnode);
+      zk.create().forPath(rootZnode +"/hbaseid",uuidBytes);
     } else {
       // If the znode is there already make sure it has the
       // expected value for the peer name.
@@ -340,14 +336,14 @@
 
         if (peerThere) {
           if (enablePeer){
-            LOG.info("enable peer," + peerName);
-              List<ReplicationPeerDescription> peers = admin.listReplicationPeers().stream()
-                      .filter((peer)->peer.getPeerId().equals(peerName))
-                      .filter((peer)->peer.isEnabled()==false)
-                      .collect(Collectors.toList());
-              if (!peers.isEmpty()){
-                admin.enableReplicationPeer(peerName);
-              }
+            LOG.info("enable peer,{}", peerName);
+            List<ReplicationPeerDescription> peers = admin.listReplicationPeers().stream()
+                    .filter(peer -> peer.getPeerId().equals(peerName))
+                    .filter(peer -> !peer.isEnabled())
+                    .collect(Collectors.toList());
+            if (!peers.isEmpty()){
+              admin.enableReplicationPeer(peerName);
+            }
           }
           break;
         } else {
diff --git a/kafka/hbase-kafka-proxy/src/test/java/org/apache/hadoop/hbase/kafka/TestQualifierMatching.java b/kafka/hbase-kafka-proxy/src/test/java/org/apache/hadoop/hbase/kafka/TestQualifierMatching.java
index dc9ec61..c965f12 100644
--- a/kafka/hbase-kafka-proxy/src/test/java/org/apache/hadoop/hbase/kafka/TestQualifierMatching.java
+++ b/kafka/hbase-kafka-proxy/src/test/java/org/apache/hadoop/hbase/kafka/TestQualifierMatching.java
@@ -14,13 +14,13 @@
  */
 package org.apache.hadoop.hbase.kafka;
 
+import java.nio.charset.StandardCharsets;
+
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import java.nio.charset.StandardCharsets;
-
 /**
  * Make sure match rules work
  */
diff --git a/pom.xml b/pom.xml
index a0244db..91600b5 100644
--- a/pom.xml
+++ b/pom.xml
@@ -418,10 +418,20 @@
               <version>${checkstyle.version}</version>
             </dependency>
           </dependencies>
+          <executions>
+            <execution>
+              <id>checkstyle</id>
+              <phase>validate</phase>
+              <goals>
+                <goal>check</goal>
+              </goals>
+            </execution>
+          </executions>
           <configuration>
             <configLocation>hbase/checkstyle.xml</configLocation>
             <suppressionsLocation>hbase/checkstyle-suppressions.xml</suppressionsLocation>
             <includeTestSourceDirectory>true</includeTestSourceDirectory>
+            <failOnViolation>true</failOnViolation>
           </configuration>
         </plugin>
       </plugins>
diff --git a/spark/hbase-spark-it/pom.xml b/spark/hbase-spark-it/pom.xml
index c367f95..06944eb 100644
--- a/spark/hbase-spark-it/pom.xml
+++ b/spark/hbase-spark-it/pom.xml
@@ -160,9 +160,6 @@
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-checkstyle-plugin</artifactId>
-        <configuration>
-          <failOnViolation>true</failOnViolation>
-        </configuration>
       </plugin>
       <plugin>
         <groupId>net.revelc.code</groupId>
diff --git a/spark/hbase-spark/pom.xml b/spark/hbase-spark/pom.xml
index 0db9245..8dc812f 100644
--- a/spark/hbase-spark/pom.xml
+++ b/spark/hbase-spark/pom.xml
@@ -234,9 +234,6 @@
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-checkstyle-plugin</artifactId>
-        <configuration>
-          <failOnViolation>true</failOnViolation>
-        </configuration>
       </plugin>
       <plugin>
         <groupId>net.revelc.code</groupId>
diff --git a/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java b/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
index 57f9163..c74db6b 100644
--- a/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
+++ b/spark/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/SparkSQLPushDownFilter.java
@@ -14,7 +14,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.spark;
 
 import java.io.IOException;
@@ -32,17 +31,16 @@
 import org.apache.hadoop.hbase.spark.datasources.Field;
 import org.apache.hadoop.hbase.spark.datasources.JavaBytesEncoder;
 import org.apache.hadoop.hbase.spark.protobuf.generated.SparkFilterProtos;
-import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import scala.collection.mutable.MutableList;
+
 import org.apache.hbase.thirdparty.com.google.protobuf.ByteString;
 import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
 
-import scala.collection.mutable.MutableList;
-
 /**
  * This filter will push down all qualifier logic given to us
  * by SparkSQL so that we have make the filters at the region server level
diff --git a/spark/hbase-spark/src/test/java/org/apache/hadoop/hbase/spark/TestJavaHBaseContext.java b/spark/hbase-spark/src/test/java/org/apache/hadoop/hbase/spark/TestJavaHBaseContext.java
index 865a3a3..61ada1d 100644
--- a/spark/hbase-spark/src/test/java/org/apache/hadoop/hbase/spark/TestJavaHBaseContext.java
+++ b/spark/hbase-spark/src/test/java/org/apache/hadoop/hbase/spark/TestJavaHBaseContext.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hbase.spark;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
@@ -63,8 +62,6 @@
 import org.slf4j.LoggerFactory;
 import scala.Tuple2;
 
-import org.apache.hbase.thirdparty.com.google.common.io.Files;
-
 @Category({MiscTests.class, MediumTests.class})
 public class TestJavaHBaseContext implements Serializable {
 
@@ -133,7 +130,7 @@
 
   @After
   public void tearDown() throws Exception {
-      TEST_UTIL.deleteTable(TableName.valueOf(tableName));
+    TEST_UTIL.deleteTable(TableName.valueOf(tableName));
   }
 
   @Test
@@ -384,8 +381,8 @@
 
     Configuration conf = TEST_UTIL.getConfiguration();
 
-    HBASE_CONTEXT.bulkLoadThinRows(rdd, TableName.valueOf(tableName), new BulkLoadThinRowsFunction(),
-            output.toString(), new HashMap<byte[], FamilyHFileWriteOptions>(), false,
+    HBASE_CONTEXT.bulkLoadThinRows(rdd, TableName.valueOf(tableName),
+            new BulkLoadThinRowsFunction(), output.toString(), new HashMap<>(), false,
             HConstants.DEFAULT_MAX_FILE_SIZE);
 
 
@@ -524,5 +521,4 @@
       table.put(puts);
     }
   }
-
 }