Merge pull request #3101 from frison/STORM-1515

STORM-1515: Fix LocalState Corruption
diff --git a/.travis.yml b/.travis.yml
index 1a79f48..d35d5e3 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -19,6 +19,7 @@
   - MODULES=Core
   - MODULES=External
   - MODULES=Integration-Test
+  - MODULES=Check-Updated-License-Files
 
 dist: trusty
 sudo: required
@@ -39,6 +40,12 @@
   - sudo apt-get update
   - sudo apt-get install python3
   - sudo apt-get install python3-pip
+  - sudo add-apt-repository ppa:deadsnakes/ppa -y
+  - sudo apt-get update
+  - sudo apt-get install python3.6
+  - wget http://mirrors.rackhosting.com/apache/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.tar.gz -P $HOME
+  - tar xzvf $HOME/apache-maven-3.6.1-bin.tar.gz -C $HOME
+  - export PATH=$HOME/apache-maven-3.6.1/bin:$PATH
 install: /bin/bash ./dev-tools/travis/travis-install.sh `pwd`
 script:
   - /bin/bash ./dev-tools/travis/travis-script.sh `pwd` $MODULES
diff --git a/DEPENDENCY-LICENSES b/DEPENDENCY-LICENSES
index 16cc35a..e0344fc 100644
--- a/DEPENDENCY-LICENSES
+++ b/DEPENDENCY-LICENSES
@@ -5,11 +5,7 @@
     Apache License
 
         * HttpClient (commons-httpclient:commons-httpclient:3.0.1 - http://jakarta.apache.org/commons/httpclient/)
-        * HttpClient (commons-httpclient:commons-httpclient:3.1 - http://jakarta.apache.org/httpcomponents/httpclient-3.x/)
-        * HttpClient (org.apache.httpcomponents:httpclient:4.2.5 - http://hc.apache.org/httpcomponents-client)
-        * HttpCore (org.apache.httpcomponents:httpcore:4.2.4 - http://hc.apache.org/httpcomponents-core-ga)
-        * Log4j Implemented Over SLF4J (org.slf4j:log4j-over-slf4j:1.6.6 - http://www.slf4j.org)
-        * Log4j Implemented Over SLF4J (org.slf4j:log4j-over-slf4j:1.7.25 - http://www.slf4j.org)
+        * Log4j Implemented Over SLF4J (org.slf4j:log4j-over-slf4j:1.7.26 - http://www.slf4j.org)
 
     Apache License, Version 2.0
 
@@ -37,19 +33,14 @@
         * Apache Calcite Avatica (org.apache.calcite.avatica:avatica-core:1.10.0 - https://calcite.apache.org/avatica/avatica-core)
         * Apache Calcite Avatica Metrics (org.apache.calcite.avatica:avatica-metrics:1.10.0 - https://calcite.apache.org/avatica/avatica-metrics)
         * Apache Calcite Avatica Metrics (org.apache.calcite.avatica:avatica-metrics:1.8.0 - http://calcite.apache.org/avatica/avatica-metrics)
-        * Apache Commons CLI (commons-cli:commons-cli:1.3.1 - http://commons.apache.org/proper/commons-cli/)
         * Apache Commons CLI (commons-cli:commons-cli:1.4 - http://commons.apache.org/proper/commons-cli/)
-        * Apache Commons Codec (commons-codec:commons-codec:1.10 - http://commons.apache.org/proper/commons-codec/)
         * Apache Commons Codec (commons-codec:commons-codec:1.11 - http://commons.apache.org/proper/commons-codec/)
-        * Apache Commons Codec (commons-codec:commons-codec:1.9 - http://commons.apache.org/proper/commons-codec/)
         * Apache Commons Collections (commons-collections:commons-collections:3.2.2 - http://commons.apache.org/collections/)
         * Apache Commons Compress (org.apache.commons:commons-compress:1.18 - https://commons.apache.org/proper/commons-compress/)
-        * Apache Commons Compress (org.apache.commons:commons-compress:1.9 - http://commons.apache.org/proper/commons-compress/)
         * Apache Commons Crypto (org.apache.commons:commons-crypto:1.0.0 - http://commons.apache.org/proper/commons-crypto/)
         * Apache Commons CSV (org.apache.commons:commons-csv:1.4 - http://commons.apache.org/proper/commons-csv/)
         * Apache Commons Exec (org.apache.commons:commons-exec:1.3 - http://commons.apache.org/proper/commons-exec/)
         * Apache Commons FileUpload (commons-fileupload:commons-fileupload:1.3.3 - http://commons.apache.org/proper/commons-fileupload/)
-        * Apache Commons IO (commons-io:commons-io:2.5 - http://commons.apache.org/proper/commons-io/)
         * Apache Commons IO (commons-io:commons-io:2.6 - http://commons.apache.org/proper/commons-io/)
         * Apache Commons Lang (org.apache.commons:commons-lang3:3.2 - http://commons.apache.org/proper/commons-lang/)
         * Apache Commons Lang (org.apache.commons:commons-lang3:3.3 - http://commons.apache.org/proper/commons-lang/)
@@ -69,16 +60,11 @@
         * ApacheDS I18n (org.apache.directory.server:apacheds-i18n:2.0.0-M15 - http://directory.apache.org/apacheds/1.5/apacheds-i18n)
         * ApacheDS Protocol Kerberos Codec (org.apache.directory.server:apacheds-kerberos-codec:2.0.0-M15 - http://directory.apache.org/apacheds/1.5/apacheds-kerberos-codec)
         * Apache Groovy (org.codehaus.groovy:groovy-all:2.4.4 - http://groovy-lang.org)
-        * Apache Hadoop Annotations (org.apache.hadoop:hadoop-annotations:2.7.7 - no url defined)
         * Apache Hadoop Annotations (org.apache.hadoop:hadoop-annotations:2.8.5 - no url defined)
-        * Apache Hadoop Archives (org.apache.hadoop:hadoop-archives:2.7.2 - no url defined)
-        * Apache Hadoop Auth (org.apache.hadoop:hadoop-auth:2.7.7 - no url defined)
+        * Apache Hadoop Archives (org.apache.hadoop:hadoop-archives:2.8.5 - no url defined)
         * Apache Hadoop Auth (org.apache.hadoop:hadoop-auth:2.8.5 - no url defined)
         * Apache Hadoop Client (org.apache.hadoop:hadoop-client:2.8.5 - no url defined)
-        * Apache Hadoop Common (org.apache.hadoop:hadoop-common:2.7.7 - no url defined)
         * Apache Hadoop Common (org.apache.hadoop:hadoop-common:2.8.5 - no url defined)
-        * Apache Hadoop HDFS (org.apache.hadoop:hadoop-hdfs:2.7.2 - no url defined)
-        * Apache Hadoop HDFS (org.apache.hadoop:hadoop-hdfs:2.7.7 - no url defined)
         * Apache Hadoop HDFS (org.apache.hadoop:hadoop-hdfs:2.8.5 - no url defined)
         * Apache Hadoop HDFS Client (org.apache.hadoop:hadoop-hdfs-client:2.8.5 - no url defined)
         * Apache Hadoop MapReduce App (org.apache.hadoop:hadoop-mapreduce-client-app:2.8.5 - no url defined)
@@ -87,9 +73,14 @@
         * Apache Hadoop MapReduce JobClient (org.apache.hadoop:hadoop-mapreduce-client-jobclient:2.8.5 - no url defined)
         * Apache Hadoop MapReduce Shuffle (org.apache.hadoop:hadoop-mapreduce-client-shuffle:2.8.5 - no url defined)
         * Apache Hadoop YARN API (org.apache.hadoop:hadoop-yarn-api:2.8.5 - no url defined)
+        * Apache Hadoop YARN ApplicationHistoryService (org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice:2.8.5 - no url defined)
         * Apache Hadoop YARN Client (org.apache.hadoop:hadoop-yarn-client:2.8.5 - no url defined)
         * Apache Hadoop YARN Common (org.apache.hadoop:hadoop-yarn-common:2.8.5 - no url defined)
+        * Apache Hadoop YARN NodeManager (org.apache.hadoop:hadoop-yarn-server-nodemanager:2.8.5 - no url defined)
+        * Apache Hadoop YARN Registry (org.apache.hadoop:hadoop-yarn-registry:2.8.5 - no url defined)
+        * Apache Hadoop YARN ResourceManager (org.apache.hadoop:hadoop-yarn-server-resourcemanager:2.8.5 - no url defined)
         * Apache Hadoop YARN Server Common (org.apache.hadoop:hadoop-yarn-server-common:2.8.5 - no url defined)
+        * Apache Hadoop YARN Web Proxy (org.apache.hadoop:hadoop-yarn-server-web-proxy:2.8.5 - no url defined)
         * Apache HBase - Annotations (org.apache.hbase:hbase-annotations:2.1.3 - http://hbase.apache.org/hbase-annotations)
         * Apache HBase - Client (org.apache.hbase:hbase-client:2.1.3 - http://hbase.apache.org/hbase-build-configuration/hbase-client)
         * Apache HBase - Common (org.apache.hbase:hbase-common:2.1.3 - http://hbase.apache.org/hbase-build-configuration/hbase-common)
@@ -108,28 +99,22 @@
         * Apache HBase - Shaded Protocol (org.apache.hbase:hbase-protocol-shaded:2.1.3 - http://hbase.apache.org/hbase-build-configuration/hbase-protocol-shaded)
         * Apache HBase - Zookeeper (org.apache.hbase:hbase-zookeeper:2.1.3 - http://hbase.apache.org/hbase-build-configuration/hbase-zookeeper)
         * Apache HttpAsyncClient (org.apache.httpcomponents:httpasyncclient:4.1.2 - http://hc.apache.org/httpcomponents-asyncclient)
-        * Apache HttpClient (org.apache.httpcomponents:httpclient:4.3.6 - http://hc.apache.org/httpcomponents-client)
-        * Apache HttpClient (org.apache.httpcomponents:httpclient:4.4.1 - http://hc.apache.org/httpcomponents-client)
         * Apache HttpClient (org.apache.httpcomponents:httpclient:4.5.2 - http://hc.apache.org/httpcomponents-client)
         * Apache HttpClient (org.apache.httpcomponents:httpclient:4.5.6 - http://hc.apache.org/httpcomponents-client)
         * Apache HttpClient (org.apache.httpcomponents:httpclient:4.5 - http://hc.apache.org/httpcomponents-client)
         * Apache HttpClient Mime (org.apache.httpcomponents:httpmime:4.4.1 - http://hc.apache.org/httpcomponents-client)
-        * Apache HttpCore (org.apache.httpcomponents:httpcore:4.3.3 - http://hc.apache.org/httpcomponents-core-ga)
         * Apache HttpCore (org.apache.httpcomponents:httpcore:4.4.10 - http://hc.apache.org/httpcomponents-core-ga)
         * Apache HttpCore (org.apache.httpcomponents:httpcore:4.4.1 - http://hc.apache.org/httpcomponents-core-ga)
-        * Apache HttpCore (org.apache.httpcomponents:httpcore:4.4.4 - http://hc.apache.org/httpcomponents-core-ga)
         * Apache HttpCore (org.apache.httpcomponents:httpcore:4.4.5 - http://hc.apache.org/httpcomponents-core-ga)
         * Apache HttpCore NIO (org.apache.httpcomponents:httpcore-nio:4.4.5 - http://hc.apache.org/httpcomponents-core-ga)
         * Apache Ivy (org.apache.ivy:ivy:2.4.0 - http://ant.apache.org/ivy/)
         * Apache Kafka (org.apache.kafka:kafka-clients:0.11.0.3 - http://kafka.apache.org)
         * Apache Log4j (log4j:log4j:1.2.17 - http://logging.apache.org/log4j/1.2/)
-        * Apache Log4j 1.x Compatibility API (org.apache.logging.log4j:log4j-1.2-api:2.6.2 - http://logging.apache.org/log4j/2.x/log4j-1.2-api/)
-        * Apache Log4j API (org.apache.logging.log4j:log4j-api:2.11.1 - https://logging.apache.org/log4j/2.x/log4j-api/)
-        * Apache Log4j API (org.apache.logging.log4j:log4j-api:2.6.2 - http://logging.apache.org/log4j/2.x/log4j-api/)
-        * Apache Log4j Core (org.apache.logging.log4j:log4j-core:2.11.1 - https://logging.apache.org/log4j/2.x/log4j-core/)
-        * Apache Log4j Core (org.apache.logging.log4j:log4j-core:2.6.2 - http://logging.apache.org/log4j/2.x/log4j-core/)
-        * Apache Log4j SLF4J Binding (org.apache.logging.log4j:log4j-slf4j-impl:2.11.1 - https://logging.apache.org/log4j/2.x/log4j-slf4j-impl/)
-        * Apache Log4j Web (org.apache.logging.log4j:log4j-web:2.6.2 - http://logging.apache.org/log4j/2.x/log4j-web/)
+        * Apache Log4j 1.x Compatibility API (org.apache.logging.log4j:log4j-1.2-api:2.11.2 - https://logging.apache.org/log4j/2.x/log4j-1.2-api/)
+        * Apache Log4j API (org.apache.logging.log4j:log4j-api:2.11.2 - https://logging.apache.org/log4j/2.x/log4j-api/)
+        * Apache Log4j Core (org.apache.logging.log4j:log4j-core:2.11.2 - https://logging.apache.org/log4j/2.x/log4j-core/)
+        * Apache Log4j SLF4J Binding (org.apache.logging.log4j:log4j-slf4j-impl:2.11.2 - https://logging.apache.org/log4j/2.x/log4j-slf4j-impl/)
+        * Apache Log4j Web (org.apache.logging.log4j:log4j-web:2.11.2 - https://logging.apache.org/log4j/2.x/log4j-web/)
         * Apache Maven Artifact Transfer (org.apache.maven.shared:maven-artifact-transfer:0.9.1 - https://maven.apache.org/shared/maven-artifact-transfer/)
         * Apache Maven Common Artifact Filters (org.apache.maven.shared:maven-common-artifact-filters:3.0.1 - https://maven.apache.org/shared/maven-common-artifact-filters/)
         * Apache Maven Dependency Tree (org.apache.maven.shared:maven-dependency-tree:2.2 - http://maven.apache.org/shared/maven-dependency-tree/)
@@ -139,6 +124,7 @@
         * Apache Parquet Hadoop Bundle (org.apache.parquet:parquet-hadoop-bundle:1.8.1 - https://parquet.apache.org)
         * Apache Solr Solrj (org.apache.solr:solr-solrj:5.5.5 - http://lucene.apache.org/solr-parent/solr-solrj)
         * Apache Thrift (org.apache.thrift:libfb303:0.9.3 - http://thrift.apache.org)
+        * Apache Thrift (org.apache.thrift:libthrift:0.12.0 - http://thrift.apache.org)
         * Apache Thrift (org.apache.thrift:libthrift:0.9.3 - http://thrift.apache.org)
         * Apache Twill API (org.apache.twill:twill-api:0.6.0-incubating - http://twill.incubator.apache.org/twill-api)
         * Apache Twill common library (org.apache.twill:twill-common:0.6.0-incubating - http://twill.incubator.apache.org/twill-common)
@@ -151,6 +137,7 @@
         * Apache Yetus - Audience Annotations (org.apache.yetus:audience-annotations:0.5.0 - https://yetus.apache.org/audience-annotations)
         * ASM based accessors helper used by json-smart (net.minidev:accessors-smart:1.2 - http://www.minidev.net/)
         * Auto Common Libraries (com.google.auto:auto-common:0.8 - https://github.com/google/auto/auto-common)
+        * AutoService (com.google.auto.service:auto-service:1.0-rc4 - https://github.com/google/auto/auto-service)
         * AWS Java SDK for Amazon API Gateway (com.amazonaws:aws-java-sdk-api-gateway:1.10.77 - https://aws.amazon.com/sdkforjava)
         * AWS Java SDK for Amazon CloudFront (com.amazonaws:aws-java-sdk-cloudfront:1.10.77 - https://aws.amazon.com/sdkforjava)
         * AWS Java SDK for Amazon CloudSearch (com.amazonaws:aws-java-sdk-cloudsearch:1.10.77 - https://aws.amazon.com/sdkforjava)
@@ -215,7 +202,7 @@
         * AWS Java SDK for the AWS Simple Systems Management (SSM) Service (com.amazonaws:aws-java-sdk-ssm:1.10.77 - https://aws.amazon.com/sdkforjava)
         * AWS SDK For Java (com.amazonaws:aws-java-sdk:1.10.77 - https://aws.amazon.com/sdkforjava)
         * AWS SDK for Java - Core (com.amazonaws:aws-java-sdk-core:1.10.77 - https://aws.amazon.com/sdkforjava)
-        * Bean Validation API (javax.validation:validation-api:1.1.0.Final - http://beanvalidation.org)
+        * Bean Validation API (javax.validation:validation-api:2.0.1.Final - http://beanvalidation.org)
         * BoneCP :: Core Library (com.jolbox:bonecp:0.8.0.RELEASE - http://jolbox.com/bonecp)
         * Caffeine cache (com.github.ben-manes.caffeine:caffeine:2.3.5 - https://github.com/ben-manes/caffeine)
         * Calcite Core (org.apache.calcite:calcite-core:1.14.0 - https://calcite.apache.org/calcite-core)
@@ -232,28 +219,21 @@
         * commons-beanutils (commons-beanutils:commons-beanutils:1.7.0 - no url defined)
         * Commons BeanUtils Core (commons-beanutils:commons-beanutils-core:1.8.0 - http://commons.apache.org/beanutils/)
         * Commons CLI (commons-cli:commons-cli:1.2 - http://commons.apache.org/cli/)
-        * Commons Codec (commons-codec:commons-codec:1.4 - http://commons.apache.org/codec/)
-        * Commons Codec (commons-codec:commons-codec:1.6 - http://commons.apache.org/codec/)
-        * Commons Compress (org.apache.commons:commons-compress:1.4.1 - http://commons.apache.org/compress/)
         * Commons Configuration (commons-configuration:commons-configuration:1.6 - http://commons.apache.org/${pom.artifactId.substring(8)}/)
         * Commons Daemon (commons-daemon:commons-daemon:1.0.13 - http://commons.apache.org/daemon/)
         * Commons DBCP (commons-dbcp:commons-dbcp:1.4 - http://commons.apache.org/dbcp/)
         * Commons IO (commons-io:commons-io:1.4 - http://commons.apache.org/io/)
-        * Commons IO (commons-io:commons-io:2.4 - http://commons.apache.org/io/)
         * Commons Lang (commons-lang:commons-lang:2.5 - http://commons.apache.org/lang/)
         * Commons Lang (commons-lang:commons-lang:2.6 - http://commons.apache.org/lang/)
         * Commons Logging (commons-logging:commons-logging:1.1.3 - http://commons.apache.org/proper/commons-logging/)
-        * Commons Math (org.apache.commons:commons-math:2.2 - http://commons.apache.org/math/)
         * Commons Math (org.apache.commons:commons-math3:3.1.1 - http://commons.apache.org/math/)
         * Commons Math (org.apache.commons:commons-math3:3.2 - http://commons.apache.org/proper/commons-math/)
         * Commons Net (commons-net:commons-net:3.1 - http://commons.apache.org/net/)
         * Commons Pool (commons-pool:commons-pool:1.5.4 - http://commons.apache.org/pool/)
         * Compress-LZF (com.ning:compress-lzf:1.0.2 - http://github.com/ning/compress)
-        * Curator Client (org.apache.curator:curator-client:2.7.1 - http://curator.apache.org/curator-client)
         * Curator Client (org.apache.curator:curator-client:4.2.0 - http://curator.apache.org/curator-client)
-        * Curator Framework (org.apache.curator:curator-framework:2.7.1 - http://curator.apache.org/curator-framework)
         * Curator Framework (org.apache.curator:curator-framework:4.2.0 - http://curator.apache.org/curator-framework)
-        * Curator Recipes (org.apache.curator:curator-recipes:2.7.1 - http://curator.apache.org/curator-recipes)
+        * Curator Recipes (org.apache.curator:curator-recipes:4.2.0 - http://curator.apache.org/curator-recipes)
         * Data Mapper for Jackson (org.codehaus.jackson:jackson-mapper-asl:1.9.13 - http://jackson.codehaus.org)
         * DataNucleus Core (org.datanucleus:datanucleus-core:4.1.17 - http://www.datanucleus.org/#/datanucleus-core)
         * DataNucleus JDO API plugin (org.datanucleus:datanucleus-api-jdo:4.2.4 - http://www.datanucleus.org/#/datanucleus-api-jdo)
@@ -261,7 +241,6 @@
         * DataStax Java Driver for Apache Cassandra - Core (com.datastax.cassandra:cassandra-driver-core:3.1.2 - https://github.com/datastax/java-driver)
         * DataStax Java Driver for Apache Cassandra - Object Mapping (com.datastax.cassandra:cassandra-driver-mapping:3.1.2 - https://github.com/datastax/java-driver)
         * Digester (commons-digester:commons-digester:1.8 - http://jakarta.apache.org/commons/digester/)
-        * Disruptor Framework (com.lmax:disruptor:3.3.0 - http://lmax-exchange.github.com/disruptor)
         * Disruptor Framework (com.lmax:disruptor:3.3.6 - http://lmax-exchange.github.com/disruptor)
         * Dropwizard (io.dropwizard:dropwizard-core:1.3.5 - http://www.dropwizard.io/1.3.5/dropwizard-core)
         * Dropwizard Asset Bundle (io.dropwizard:dropwizard-assets:1.3.5 - http://www.dropwizard.io/1.3.5/dropwizard-assets)
@@ -279,20 +258,14 @@
         * EL (commons-el:commons-el:1.0 - http://jakarta.apache.org/commons/el/)
         * Elasticsearch: Core (org.elasticsearch:elasticsearch:2.4.4 - http://nexus.sonatype.org/oss-repository-hosting.html/parent/elasticsearch)
         * Elasticsearch SecureSM (org.elasticsearch:securesm:1.0 - http://nexus.sonatype.org/oss-repository-hosting.html/securesm)
-        * error-prone annotations (com.google.errorprone:error_prone_annotations:2.0.18 - http://nexus.sonatype.org/oss-repository-hosting.html/error_prone_parent/error_prone_annotations)
-        * error-prone annotations (com.google.errorprone:error_prone_annotations:2.1.3 - http://nexus.sonatype.org/oss-repository-hosting.html/error_prone_parent/error_prone_annotations)
         * error-prone annotations (com.google.errorprone:error_prone_annotations:2.2.0 - http://nexus.sonatype.org/oss-repository-hosting.html/error_prone_parent/error_prone_annotations)
         * Esri Geometry API for Java (com.esri.geometry:esri-geometry-api:2.0.0 - https://github.com/Esri/geometry-api-java)
         * fastjson (com.alibaba:fastjson:1.2.29 - https://github.com/alibaba/fastjson)
         * fastutil (it.unimi.dsi:fastutil:6.5.6 - http://fasutil.dsi.unimi.it/)
         * Findbugs Annotations under Apache License (com.github.stephenc.findbugs:findbugs-annotations:1.3.9-1 - http://stephenc.github.com/findbugs-annotations)
-        * FindBugs-jsr305 (com.google.code.findbugs:jsr305:1.3.9 - http://findbugs.sourceforge.net/)
         * FindBugs-jsr305 (com.google.code.findbugs:jsr305:3.0.0 - http://findbugs.sourceforge.net/)
         * FindBugs-jsr305 (com.google.code.findbugs:jsr305:3.0.1 - http://findbugs.sourceforge.net/)
         * FindBugs-jsr305 (com.google.code.findbugs:jsr305:3.0.2 - http://findbugs.sourceforge.net/)
-        * flux-core (org.apache.storm:flux-core:2.0.1-SNAPSHOT - https://storm.apache.org/flux/flux-core)
-        * flux-wrappers (org.apache.storm:flux-wrappers:2.0.1-SNAPSHOT - https://storm.apache.org/flux/flux-wrappers)
-        * Glassfish Jasper API (org.mortbay.jetty:jsp-api-2.1:6.1.14 - http://jetty.mortbay.org/project/modules/jsp-api-2.1)
         * Google Guice - Core Library (com.google.inject:guice:3.0 - http://code.google.com/p/google-guice/guice/)
         * Google Guice - Core Library (com.google.inject:guice:4.2.1 - https://github.com/google/guice/guice)
         * Google Guice - Extensions - AssistedInject (com.google.inject.extensions:guice-assistedinject:3.0 - http://code.google.com/p/google-guice/extensions-parent/guice-assistedinject/)
@@ -301,37 +274,18 @@
         * Gson (com.google.code.gson:gson:2.2.4 - http://code.google.com/p/google-gson/)
         * Gson (com.google.code.gson:gson:2.3.1 - http://code.google.com/p/google-gson/)
         * Gson (com.google.code.gson:gson:2.8.0 - https://github.com/google/gson/gson)
-        * Guava: Google Core Libraries for Java (com.google.guava:guava:11.0.2 - http://code.google.com/p/guava-libraries/guava)
-        * Guava: Google Core Libraries for Java (com.google.guava:guava:14.0.1 - http://code.google.com/p/guava-libraries/guava)
         * Guava: Google Core Libraries for Java (com.google.guava:guava:16.0.1 - http://code.google.com/p/guava-libraries/guava)
         * Guava: Google Core Libraries for Java (com.google.guava:guava:17.0 - http://code.google.com/p/guava-libraries/guava)
-        * Guava: Google Core Libraries for Java (com.google.guava:guava:18.0 - http://code.google.com/p/guava-libraries/guava)
-        * Guava: Google Core Libraries for Java (com.google.guava:guava:19.0 - https://github.com/google/guava/guava)
-        * Guava: Google Core Libraries for Java (com.google.guava:guava:23.5-jre - https://github.com/google/guava/guava)
-        * Guava: Google Core Libraries for Java (com.google.guava:guava:25.1-android - https://github.com/google/guava/guava)
         * Guava: Google Core Libraries for Java (com.google.guava:guava:27.0.1-jre - https://github.com/google/guava/guava)
         * Guava InternalFutureFailureAccess and InternalFutures (com.google.guava:failureaccess:1.0.1 - https://github.com/google/guava/failureaccess)
         * Guava ListenableFuture only (com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava - https://github.com/google/guava/listenablefuture)
         * Hadoop Metrics2 Reporter for Dropwizard Metrics (com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter:0.1.2 - https://github.com/joshelser/dropwizard-hadoop-metrics2)
-        * hadoop-yarn-registry (org.apache.hadoop:hadoop-yarn-registry:2.7.1 - no url defined)
-        * hadoop-yarn-server-applicationhistoryservice (org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice:2.7.2 - no url defined)
-        * hadoop-yarn-server-resourcemanager (org.apache.hadoop:hadoop-yarn-server-resourcemanager:2.7.2 - no url defined)
-        * hadoop-yarn-server-web-proxy (org.apache.hadoop:hadoop-yarn-server-web-proxy:2.7.2 - no url defined)
         * hawtbuf (org.fusesource.hawtbuf:hawtbuf:1.10 - http://hawtbuf.fusesource.org/hawtbuf)
         * hawtbuf (org.fusesource.hawtbuf:hawtbuf:1.11 - http://hawtbuf.fusesource.org/hawtbuf)
-        * hawtdispatch (org.fusesource.hawtdispatch:hawtdispatch:1.20 - http://hawtdispatch.fusesource.org/hawtdispatch/)
         * hawtdispatch (org.fusesource.hawtdispatch:hawtdispatch:1.22 - http://hawtdispatch.fusesource.org/hawtdispatch/)
         * hawtdispatch-transport (org.fusesource.hawtdispatch:hawtdispatch-transport:1.22 - http://hawtdispatch.fusesource.org/hawtdispatch-transport/)
-        * HBase - Annotations (org.apache.hbase:hbase-annotations:1.1.1 - http://hbase.apache.org/hbase-annotations)
-        * HBase - Client (org.apache.hbase:hbase-client:1.1.1 - http://hbase.apache.org/hbase-client)
-        * HBase - Common (org.apache.hbase:hbase-common:1.1.1 - http://hbase.apache.org/hbase-common)
-        * HBase - Hadoop Compatibility (org.apache.hbase:hbase-hadoop-compat:1.1.1 - http://hbase.apache.org/hbase-hadoop-compat)
-        * HBase - Hadoop Two Compatibility (org.apache.hbase:hbase-hadoop2-compat:1.1.1 - http://hbase.apache.org/hbase-hadoop2-compat)
-        * HBase - Prefix Tree (org.apache.hbase:hbase-prefix-tree:1.1.1 - http://hbase.apache.org/hbase-prefix-tree)
-        * HBase - Procedure (org.apache.hbase:hbase-procedure:1.1.1 - http://hbase.apache.org/hbase-procedure)
-        * HBase - Protocol (org.apache.hbase:hbase-protocol:1.1.1 - http://hbase.apache.org/hbase-protocol)
-        * HBase - Server (org.apache.hbase:hbase-server:1.1.1 - http://hbase.apache.org/hbase-server)
         * Hibernate Validator Engine (org.hibernate:hibernate-validator:5.4.2.Final - http://hibernate.org/validator/hibernate-validator)
+        * Hibernate Validator Engine (org.hibernate.validator:hibernate-validator:6.0.17.Final - http://hibernate.org/validator/hibernate-validator)
         * HikariCP (com.zaxxer:HikariCP:2.4.7 - https://github.com/brettwooldridge/HikariCP)
         * HikariCP (com.zaxxer:HikariCP:2.5.1 - https://github.com/brettwooldridge/HikariCP)
         * Hive CLI (org.apache.hive:hive-cli:2.3.4 - http://hive.apache.org/hive-cli)
@@ -356,49 +310,29 @@
         * Hive Storage API (org.apache.hive:hive-storage-api:2.4.0 - https://www.apache.org/hive-storage-api/)
         * Hive Vector-Code-Gen Utilities (org.apache.hive:hive-vector-code-gen:2.3.4 - http://hive.apache.org/hive-vector-code-gen)
         * HPPC Collections (com.carrotsearch:hppc:0.7.1 - http://labs.carrotsearch.com/hppc.html/hppc)
-        * htrace-core (org.apache.htrace:htrace-core:3.1.0-incubating - http://incubator.apache.org/projects/htrace.html)
         * htrace-core4 (org.apache.htrace:htrace-core4:4.0.1-incubating - http://incubator.apache.org/projects/htrace.html)
         * htrace-core4 (org.apache.htrace:htrace-core4:4.2.0-incubating - http://incubator.apache.org/projects/htrace.html)
         * J2EE Management 1.1 (org.apache.geronimo.specs:geronimo-j2ee-management_1.1_spec:1.0.1 - http://geronimo.apache.org/specs/geronimo-j2ee-management_1.1_spec)
         * j2html (com.j2html:j2html:1.0.0 - http://j2html.com)
         * J2ObjC Annotations (com.google.j2objc:j2objc-annotations:1.1 - https://github.com/google/j2objc/)
         * Jackson (org.codehaus.jackson:jackson-core-asl:1.9.13 - http://jackson.codehaus.org)
-        * Jackson-annotations (com.fasterxml.jackson.core:jackson-annotations:2.4.0 - http://wiki.fasterxml.com/JacksonHome)
-        * Jackson-annotations (com.fasterxml.jackson.core:jackson-annotations:2.5.0 - http://github.com/FasterXML/jackson)
-        * Jackson-annotations (com.fasterxml.jackson.core:jackson-annotations:2.6.3 - http://github.com/FasterXML/jackson)
         * Jackson-annotations (com.fasterxml.jackson.core:jackson-annotations:2.9.0 - http://github.com/FasterXML/jackson)
-        * Jackson-annotations (com.fasterxml.jackson.core:jackson-annotations:2.9.2 - http://github.com/FasterXML/jackson)
-        * Jackson-core (com.fasterxml.jackson.core:jackson-core:2.4.3 - http://wiki.fasterxml.com/JacksonHome)
-        * Jackson-core (com.fasterxml.jackson.core:jackson-core:2.5.3 - https://github.com/FasterXML/jackson)
-        * Jackson-core (com.fasterxml.jackson.core:jackson-core:2.6.3 - https://github.com/FasterXML/jackson-core)
-        * Jackson-core (com.fasterxml.jackson.core:jackson-core:2.8.1 - https://github.com/FasterXML/jackson-core)
-        * Jackson-core (com.fasterxml.jackson.core:jackson-core:2.9.2 - https://github.com/FasterXML/jackson-core)
-        * Jackson-core (com.fasterxml.jackson.core:jackson-core:2.9.6 - https://github.com/FasterXML/jackson-core)
         * Jackson-core (com.fasterxml.jackson.core:jackson-core:2.9.8 - https://github.com/FasterXML/jackson-core)
-        * jackson-databind (com.fasterxml.jackson.core:jackson-databind:2.4.3 - http://wiki.fasterxml.com/JacksonHome)
-        * jackson-databind (com.fasterxml.jackson.core:jackson-databind:2.5.3 - http://github.com/FasterXML/jackson)
-        * jackson-databind (com.fasterxml.jackson.core:jackson-databind:2.6.3 - http://github.com/FasterXML/jackson)
-        * jackson-databind (com.fasterxml.jackson.core:jackson-databind:2.9.2 - http://github.com/FasterXML/jackson)
-        * jackson-databind (com.fasterxml.jackson.core:jackson-databind:2.9.6 - http://github.com/FasterXML/jackson)
         * jackson-databind (com.fasterxml.jackson.core:jackson-databind:2.9.8 - http://github.com/FasterXML/jackson)
-        * Jackson dataformat: CBOR (com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:2.8.1 - http://github.com/FasterXML/jackson-dataformats-binary)
-        * Jackson dataformat: Smile (com.fasterxml.jackson.dataformat:jackson-dataformat-smile:2.8.1 - http://github.com/FasterXML/jackson-dataformats-binary)
+        * Jackson dataformat: CBOR (com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:2.9.8 - http://github.com/FasterXML/jackson-dataformats-binary)
         * Jackson dataformat: Smile (com.fasterxml.jackson.dataformat:jackson-dataformat-smile:2.9.8 - http://github.com/FasterXML/jackson-dataformats-binary)
-        * Jackson-dataformat-CBOR (com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:2.5.3 - http://wiki.fasterxml.com/JacksonForCbor)
-        * Jackson-dataformat-YAML (com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.8.1 - https://github.com/FasterXML/jackson)
-        * Jackson-dataformat-YAML (com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.9.6 - https://github.com/FasterXML/jackson-dataformats-text)
-        * Jackson datatype: Guava (com.fasterxml.jackson.datatype:jackson-datatype-guava:2.9.6 - https://github.com/FasterXML/jackson-datatypes-collections)
-        * Jackson datatype: jdk8 (com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.9.6 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jdk8)
-        * Jackson datatype: JSR310 (com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.9.6 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jsr310)
-        * Jackson-datatype-Joda (com.fasterxml.jackson.datatype:jackson-datatype-joda:2.9.6 - http://wiki.fasterxml.com/JacksonModuleJoda)
+        * Jackson-dataformat-YAML (com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.9.8 - https://github.com/FasterXML/jackson-dataformats-text)
+        * Jackson datatype: Guava (com.fasterxml.jackson.datatype:jackson-datatype-guava:2.9.8 - https://github.com/FasterXML/jackson-datatypes-collections)
+        * Jackson datatype: jdk8 (com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.9.8 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jdk8)
+        * Jackson datatype: JSR310 (com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.9.8 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jsr310)
+        * Jackson-datatype-Joda (com.fasterxml.jackson.datatype:jackson-datatype-joda:2.9.8 - http://wiki.fasterxml.com/JacksonModuleJoda)
         * Jackson Integration for Metrics (io.dropwizard.metrics:metrics-json:3.1.0 - http://metrics.codahale.com/metrics-json/)
         * Jackson Integration for Metrics (io.dropwizard.metrics:metrics-json:4.0.2 - http://metrics.dropwizard.io/metrics-json)
-        * Jackson-JAXRS-base (com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.9.6 - http://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-base)
-        * Jackson-JAXRS-JSON (com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.9.6 - http://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-json-provider)
-        * Jackson module: Afterburner (com.fasterxml.jackson.module:jackson-module-afterburner:2.9.6 - https://github.com/FasterXML/jackson-modules-base)
-        * Jackson module: JAXB-annotations (com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.8.10 - http://github.com/FasterXML/jackson-module-jaxb-annotations)
-        * Jackson module: JAXB Annotations (com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.9.6 - https://github.com/FasterXML/jackson-modules-base)
-        * Jackson-module-parameter-names (com.fasterxml.jackson.module:jackson-module-parameter-names:2.9.6 - https://github.com/FasterXML/jackson-modules-java8/jackson-module-parameter-names)
+        * Jackson-JAXRS-base (com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.9.8 - http://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-base)
+        * Jackson-JAXRS-JSON (com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.9.8 - http://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-json-provider)
+        * Jackson module: Afterburner (com.fasterxml.jackson.module:jackson-module-afterburner:2.9.8 - https://github.com/FasterXML/jackson-modules-base)
+        * Jackson module: JAXB Annotations (com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.9.8 - https://github.com/FasterXML/jackson-modules-base)
+        * Jackson-module-parameter-names (com.fasterxml.jackson.module:jackson-module-parameter-names:2.9.8 - https://github.com/FasterXML/jackson-modules-java8/jackson-module-parameter-names)
         * jasper-compiler (tomcat:jasper-compiler:5.5.23 - http://tomcat.apache.org/jasper-compiler)
         * jasper-runtime (tomcat:jasper-runtime:5.5.23 - http://tomcat.apache.org/jasper-runtime)
         * Java Authentication SPI for Containers (org.apache.geronimo.specs:geronimo-jaspic_1.0_spec:1.0 - http://geronimo.apache.org/maven/specs/geronimo-jaspic_1.0_spec/1.0)
@@ -408,6 +342,7 @@
         * java-xmlbuilder (com.jamesmurty.utils:java-xmlbuilder:0.4 - http://code.google.com/p/java-xmlbuilder/)
         * JBoss Logging 3 (org.jboss.logging:jboss-logging:3.3.0.Final - http://www.jboss.org)
         * JCIP Annotations under Apache License (com.github.stephenc.jcip:jcip-annotations:1.0-1 - http://stephenc.github.com/jcip-annotations)
+        * JCommander (com.beust:jcommander:1.27 - http://beust.com/jcommander)
         * JCommander (com.beust:jcommander:1.30 - http://beust.com/jcommander)
         * jdependency (org.vafer:jdependency:1.2 - http://github.com/tcurdt/jdependency)
         * JDO API (javax.jdo:jdo-api:3.0.1 - http://db.apache.org/jdo)
@@ -418,11 +353,7 @@
         * JMS 1.1 (org.apache.geronimo.specs:geronimo-jms_1.1_spec:1.1.1 - http://geronimo.apache.org/specs/geronimo-jms_1.1_spec)
         * jnr-constants (com.github.jnr:jnr-constants:0.9.0 - http://github.com/jnr/jnr-constants)
         * jnr-ffi (com.github.jnr:jnr-ffi:2.0.7 - http://github.com/jnr/jnr-ffi)
-        * Joda time (joda-time:joda-time:2.2 - http://joda-time.sourceforge.net)
         * Joda-Time (joda-time:joda-time:2.3 - http://www.joda.org/joda-time/)
-        * Joda-Time (joda-time:joda-time:2.8.1 - http://www.joda.org/joda-time/)
-        * Joda-Time (joda-time:joda-time:2.9.5 - http://www.joda.org/joda-time/)
-        * Joda-Time (joda-time:joda-time:2.9.9 - http://www.joda.org/joda-time/)
         * jOOL (org.jooq:jool:0.9.12 - http://nexus.sonatype.org/oss-repository-hosting.html/jool)
         * JPam (net.sf.jpam:jpam:1.1 - http://jpam.sf.net)
         * JSON.simple (com.googlecode.json-simple:json-simple:1.1 - http://code.google.com/p/json-simple/)
@@ -479,11 +410,7 @@
         * Maven Settings (org.apache.maven:maven-settings:3.6.0 - https://maven.apache.org/ref/3.6.0/maven-settings/)
         * Maven Settings Builder (org.apache.maven:maven-settings-builder:3.0 - http://maven.apache.org/maven-settings-builder/)
         * Maven Settings Builder (org.apache.maven:maven-settings-builder:3.6.0 - https://maven.apache.org/ref/3.6.0/maven-settings-builder/)
-        * Metrics Core (io.dropwizard.metrics:metrics-core:3.1.0 - http://metrics.codahale.com/metrics-core/)
-        * Metrics Core (io.dropwizard.metrics:metrics-core:3.1.2 - http://metrics.codahale.com/metrics-core/)
-        * Metrics Core (io.dropwizard.metrics:metrics-core:3.2.1 - http://metrics.codahale.com/metrics-core/)
         * Metrics Core (io.dropwizard.metrics:metrics-core:3.2.6 - http://metrics.dropwizard.io/metrics-core/)
-        * Metrics Core Library (com.yammer.metrics:metrics-core:2.2.0 - http://metrics.codahale.com/metrics-core/)
         * Metrics Health Checks (io.dropwizard.metrics:metrics-healthchecks:4.0.2 - http://metrics.dropwizard.io/metrics-healthchecks)
         * Metrics Integration for Jersey 2.x (io.dropwizard.metrics:metrics-jersey2:4.0.2 - http://metrics.dropwizard.io/metrics-jersey2)
         * Metrics Integration for Jetty 9.3 and higher (io.dropwizard.metrics:metrics-jetty9:4.0.2 - http://metrics.dropwizard.io/metrics-jetty9)
@@ -493,20 +420,18 @@
         * MongoDB Java Driver (org.mongodb:mongo-java-driver:3.2.0 - http://www.mongodb.org)
         * mqtt-client (org.fusesource.mqtt-client:mqtt-client:1.10 - http://mqtt-client.fusesource.org/mqtt-client/)
         * mqtt-client (org.fusesource.mqtt-client:mqtt-client:1.14 - http://mqtt-client.fusesource.org/mqtt-client/)
-        * multilang-javascript (org.apache.storm:multilang-javascript:2.0.1-SNAPSHOT - https://storm.apache.org/storm-multilang/multilang-javascript)
-        * multilang-python (org.apache.storm:multilang-python:2.0.1-SNAPSHOT - https://storm.apache.org/storm-multilang/multilang-python)
-        * multilang-ruby (org.apache.storm:multilang-ruby:2.0.1-SNAPSHOT - https://storm.apache.org/storm-multilang/multilang-ruby)
-        * Netty/All-in-One (io.netty:netty-all:4.0.23.Final - http://netty.io/netty-all/)
-        * Netty/All-in-One (io.netty:netty-all:4.0.42.Final - http://netty.io/netty-all/)
-        * Netty/Buffer (io.netty:netty-buffer:4.0.37.Final - http://netty.io/netty-buffer/)
-        * Netty/Codec (io.netty:netty-codec:4.0.37.Final - http://netty.io/netty-codec/)
-        * Netty/Common (io.netty:netty-common:4.0.37.Final - http://netty.io/netty-common/)
-        * Netty/Handler (io.netty:netty-handler:4.0.37.Final - http://netty.io/netty-handler/)
-        * Netty/Transport (io.netty:netty-transport:4.0.37.Final - http://netty.io/netty-transport/)
+        * Netty/All-in-One (io.netty:netty-all:4.1.30.Final - http://netty.io/netty-all/)
+        * Netty/Buffer (io.netty:netty-buffer:4.1.30.Final - http://netty.io/netty-buffer/)
+        * Netty/Codec (io.netty:netty-codec:4.1.30.Final - http://netty.io/netty-codec/)
+        * Netty/Common (io.netty:netty-common:4.1.30.Final - http://netty.io/netty-common/)
+        * Netty/Handler (io.netty:netty-handler:4.1.30.Final - http://netty.io/netty-handler/)
+        * Netty/Resolver (io.netty:netty-resolver:4.1.30.Final - http://netty.io/netty-resolver/)
+        * Netty/Transport (io.netty:netty-transport:4.1.30.Final - http://netty.io/netty-transport/)
         * Netty (io.netty:netty:3.10.6.Final - http://netty.io/)
         * Nimbus JOSE+JWT (com.nimbusds:nimbus-jose-jwt:4.41.1 - https://bitbucket.org/connect2id/nimbus-jose-jwt)
         * Noggit (org.noggit:noggit:0.6 - http://github.com/yonik/noggit)
         * Objenesis (org.objenesis:objenesis:2.1 - http://objenesis.org)
+        * Objenesis (org.objenesis:objenesis:2.6 - http://objenesis.org)
         * OkHttp (com.squareup.okhttp:okhttp:2.4.0 - https://github.com/square/okhttp/okhttp)
         * Okio (com.squareup.okio:okio:1.4.0 - https://github.com/square/okio/okio)
         * opencsv (net.sf.opencsv:opencsv:2.3 - http://opencsv.sf.net)
@@ -528,15 +453,12 @@
         * rocketmq-client 4.2.0 (org.apache.rocketmq:rocketmq-client:4.2.0 - http://rocketmq.apache.org/rocketmq-client/)
         * rocketmq-common 4.2.0 (org.apache.rocketmq:rocketmq-common:4.2.0 - http://rocketmq.apache.org/rocketmq-common/)
         * rocketmq-remoting 4.2.0 (org.apache.rocketmq:rocketmq-remoting:4.2.0 - http://rocketmq.apache.org/rocketmq-remoting/)
-        * Shaded Deps for Storm Client (org.apache.storm:storm-shaded-deps:2.0.1-SNAPSHOT - https://storm.apache.org/storm-shaded-deps)
         * sigar (org.fusesource:sigar:1.6.4 - http://fusesource.com/sigar/)
         * Sisu - Guice (org.sonatype.sisu:sisu-guice:2.1.7 - http://forge.sonatype.com/sisu-guice/)
         * Sisu - Inject (JSR330 bean support) (org.sonatype.sisu:sisu-inject-bean:1.4.2 - http://sisu.sonatype.org/sisu-inject/guice-bean/sisu-inject-bean/)
         * Sisu - Inject (Plexus bean support) (org.sonatype.sisu:sisu-inject-plexus:1.4.2 - http://sisu.sonatype.org/sisu-inject/guice-bean/guice-plexus/sisu-inject-plexus/)
         * Slider Core (org.apache.slider:slider-core:0.90.2-incubating - http://slider.incubator.apache.org/slider-core/)
         * SnakeYAML (org.yaml:snakeyaml:1.11 - http://www.snakeyaml.org)
-        * SnakeYAML (org.yaml:snakeyaml:1.15 - http://www.snakeyaml.org)
-        * SnakeYAML (org.yaml:snakeyaml:1.18 - http://www.snakeyaml.org)
         * Snappy for Java (org.xerial.snappy:snappy-java:1.0.4.1 - http://code.google.com/p/snappy-java/)
         * Snappy for Java (org.xerial.snappy:snappy-java:1.0.5 - http://github.com/xerial/snappy-java/)
         * snappy-java (org.xerial.snappy:snappy-java:1.1.2.6 - https://github.com/xerial/snappy-java)
@@ -551,102 +473,65 @@
         * Spring Messaging (org.springframework:spring-messaging:5.1.5.RELEASE - https://github.com/spring-projects/spring-framework)
         * Spring Transaction (org.springframework:spring-tx:5.1.5.RELEASE - https://github.com/spring-projects/spring-framework)
         * StAX API (stax:stax-api:1.0.1 - http://stax.codehaus.org/)
-        * storm-autocreds (org.apache.storm:storm-autocreds:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-autocreds)
-        * Storm Client (org.apache.storm:storm-client:2.0.1-SNAPSHOT - https://storm.apache.org/storm-client)
-        * storm-clojure (org.apache.storm:storm-clojure:2.0.1-SNAPSHOT - https://storm.apache.org/storm-clojure)
-        * Storm Core (org.apache.storm:storm-core:2.0.1-SNAPSHOT - https://storm.apache.org/storm-core)
-        * storm-elasticsearch (org.apache.storm:storm-elasticsearch:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-elasticsearch)
-        * storm-hbase (org.apache.storm:storm-hbase:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-hbase)
-        * storm-hdfs (org.apache.storm:storm-hdfs:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-hdfs)
-        * storm-hdfs-blobstore (org.apache.storm:storm-hdfs-blobstore:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-hdfs-blobstore)
-        * storm-hive (org.apache.storm:storm-hive:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-hive)
-        * storm-jdbc (org.apache.storm:storm-jdbc:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-jdbc)
-        * storm-jms (org.apache.storm:storm-jms:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-jms)
-        * storm-kafka-client (org.apache.storm:storm-kafka-client:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-kafka-client)
-        * storm-metrics (org.apache.storm:storm-metrics:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-metrics)
-        * storm-mongodb (org.apache.storm:storm-mongodb:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-mongodb)
-        * storm-mqtt (org.apache.storm:storm-mqtt:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-mqtt)
-        * storm-opentsdb (org.apache.storm:storm-opentsdb:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-opentsdb)
-        * storm-pmml (org.apache.storm:storm-pmml:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-pmml)
-        * storm-redis (org.apache.storm:storm-redis:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-redis)
-        * storm-rocketmq (org.apache.storm:storm-rocketmq:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-rocketmq)
-        * storm-server (org.apache.storm:storm-server:2.0.1-SNAPSHOT - https://storm.apache.org/storm-server)
-        * storm-solr (org.apache.storm:storm-solr:2.0.1-SNAPSHOT - https://storm.apache.org/external/storm-solr)
-        * storm-sql-runtime (org.apache.storm:storm-sql-runtime:2.0.1-SNAPSHOT - https://storm.apache.org/storm-sql-runtime)
         * T-Digest (com.tdunning:t-digest:3.0 - https://github.com/tdunning/t-digest)
         * Tephra API (co.cask.tephra:tephra-api:0.6.0 - https://github.com/caskdata/tephra/tephra-api)
         * Tephra Core (co.cask.tephra:tephra-core:0.6.0 - https://github.com/caskdata/tephra/tephra-core)
         * Tephra HBase 1.0 Compatibility (co.cask.tephra:tephra-hbase-compat-1.0:0.6.0 - https://github.com/caskdata/tephra/tephra-hbase-compat-1.0)
         * The Netty Project (io.netty:netty:3.6.2.Final - http://netty.io/)
-        * The Netty Project (io.netty:netty:3.7.0.Final - http://netty.io/)
         * Woodstox (org.codehaus.woodstox:woodstox-core-asl:4.4.1 - http://woodstox.codehaus.org)
         * Xerces2 Java Parser (xerces:xercesImpl:2.9.1 - http://xerces.apache.org/xerces2-j)
         * XML Commons External Components XML APIs (xml-apis:xml-apis:1.3.04 - http://xml.apache.org/commons/components/external/)
         * zookeeper (org.apache.zookeeper:zookeeper:3.4.14 - no url defined)
         * zookeeper (org.apache.zookeeper:zookeeper:3.4.6 - no url defined)
 
-    Apache License, Version 2.0, Common Development and Distribution License (CDDL) v1.1, Modified BSD, Public Domain, The GNU General Public License (GPL), Version 2, With Classpath Exception
+    Apache License, Version 2.0, BSD 2-Clause, Eclipse Distribution License, Version 1.0, Eclipse Public License, Version 2.0, jQuery license, MIT License, Modified BSD, Public Domain, The GNU General Public License (GPL), Version 2, With Classpath Exception, W3C license
 
-        * jersey-connectors-apache (org.glassfish.jersey.connectors:jersey-apache-connector:2.27 - https://jersey.github.io/project/jersey-apache-connector/)
-        * jersey-container-grizzly2-http (org.glassfish.jersey.containers:jersey-container-grizzly2-http:2.27 - https://jersey.github.io/project/jersey-container-grizzly2-http/)
-        * jersey-container-grizzly2-servlet (org.glassfish.jersey.containers:jersey-container-grizzly2-servlet:2.27 - https://jersey.github.io/project/jersey-container-grizzly2-servlet/)
-        * jersey-container-servlet (org.glassfish.jersey.containers:jersey-container-servlet:2.27 - https://jersey.github.io/project/jersey-container-servlet/)
-        * jersey-container-servlet-core (org.glassfish.jersey.containers:jersey-container-servlet-core:2.27 - https://jersey.github.io/project/jersey-container-servlet-core/)
-        * jersey-core-client (org.glassfish.jersey.core:jersey-client:2.27 - https://jersey.github.io/jersey-client/)
-        * jersey-ext-entity-filtering (org.glassfish.jersey.ext:jersey-entity-filtering:2.27 - https://jersey.github.io/project/jersey-entity-filtering/)
-        * jersey-inject-hk2 (org.glassfish.jersey.inject:jersey-hk2:2.27 - https://jersey.github.io/project/jersey-hk2/)
-        * jersey-media-jaxb (org.glassfish.jersey.media:jersey-media-jaxb:2.27 - https://jersey.github.io/project/jersey-media-jaxb/)
+        * jersey-connectors-apache (org.glassfish.jersey.connectors:jersey-apache-connector:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-apache-connector)
+        * jersey-container-grizzly2-http (org.glassfish.jersey.containers:jersey-container-grizzly2-http:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-grizzly2-http)
+        * jersey-container-grizzly2-servlet (org.glassfish.jersey.containers:jersey-container-grizzly2-servlet:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-grizzly2-servlet)
+        * jersey-container-servlet (org.glassfish.jersey.containers:jersey-container-servlet:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-servlet)
+        * jersey-container-servlet-core (org.glassfish.jersey.containers:jersey-container-servlet-core:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-servlet-core)
+        * jersey-core-client (org.glassfish.jersey.core:jersey-client:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/jersey-client)
+        * jersey-ext-bean-validation (org.glassfish.jersey.ext:jersey-bean-validation:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-bean-validation)
+        * jersey-ext-entity-filtering (org.glassfish.jersey.ext:jersey-entity-filtering:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-entity-filtering)
+        * jersey-ext-metainf-services (org.glassfish.jersey.ext:jersey-metainf-services:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-metainf-services)
+        * jersey-inject-hk2 (org.glassfish.jersey.inject:jersey-hk2:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-hk2)
+        * jersey-media-jaxb (org.glassfish.jersey.media:jersey-media-jaxb:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-media-jaxb)
 
-    Apache License, Version 2.0, Common Development and Distribution License (CDDL) v1.1, Modified BSD, The GNU General Public License (GPL), Version 2, With Classpath Exception
+    Apache License, Version 2.0, Eclipse Public License, Version 2.0, Modified BSD, The GNU General Public License (GPL), Version 2, With Classpath Exception
 
-        * jersey-core-server (org.glassfish.jersey.core:jersey-server:2.27 - https://jersey.github.io/jersey-server/)
+        * jersey-core-server (org.glassfish.jersey.core:jersey-server:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/jersey-server)
 
-    Apache License, Version 2.0, Common Development and Distribution License (CDDL) v1.1, Public Domain, The GNU General Public License (GPL), Version 2, With Classpath Exception
+    Apache License, Version 2.0, Eclipse Public License, Version 2.0, Public Domain, The GNU General Public License (GPL), Version 2, With Classpath Exception
 
-        * jersey-core-common (org.glassfish.jersey.core:jersey-common:2.27 - https://jersey.github.io/jersey-common/)
+        * jersey-core-common (org.glassfish.jersey.core:jersey-common:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/jersey-common)
 
-    Apache License, Version 2.0, Common Development and Distribution License (CDDL) v1.1, The GNU General Public License (GPL), Version 2, With Classpath Exception
+    Apache License, Version 2.0, Eclipse Public License, Version 2.0, The GNU General Public License (GPL), Version 2, With Classpath Exception
 
-        * jersey-media-json-jackson (org.glassfish.jersey.media:jersey-media-json-jackson:2.27 - https://jersey.github.io/project/jersey-media-json-jackson/)
+        * jersey-media-json-jackson (org.glassfish.jersey.media:jersey-media-json-jackson:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-media-json-jackson)
 
     Apache License, Version 2.0, Eclipse Public License - Version 1.0
 
         * Jetty :: Aggregate :: All core Jetty (org.eclipse.jetty.aggregate:jetty-all:7.6.0.v20120127 - http://www.eclipse.org/jetty/jetty-aggregate-project/jetty-all)
-        * Jetty :: Continuation (org.eclipse.jetty:jetty-continuation:9.4.11.v20180605 - http://www.eclipse.org/jetty)
         * Jetty :: Continuation (org.eclipse.jetty:jetty-continuation:9.4.14.v20181114 - http://www.eclipse.org/jetty)
-        * Jetty :: Http Utility (org.eclipse.jetty:jetty-http:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: Http Utility (org.eclipse.jetty:jetty-http:9.4.11.v20180605 - http://www.eclipse.org/jetty)
         * Jetty :: Http Utility (org.eclipse.jetty:jetty-http:9.4.14.v20181114 - http://www.eclipse.org/jetty)
-        * Jetty :: IO Utility (org.eclipse.jetty:jetty-io:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: IO Utility (org.eclipse.jetty:jetty-io:9.4.11.v20180605 - http://www.eclipse.org/jetty)
         * Jetty :: IO Utility (org.eclipse.jetty:jetty-io:9.4.14.v20181114 - http://www.eclipse.org/jetty)
-        * Jetty :: Security (org.eclipse.jetty:jetty-security:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: Security (org.eclipse.jetty:jetty-security:9.4.11.v20180605 - http://www.eclipse.org/jetty)
         * Jetty :: Security (org.eclipse.jetty:jetty-security:9.4.14.v20181114 - http://www.eclipse.org/jetty)
-        * Jetty :: Server Core (org.eclipse.jetty:jetty-server:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: Server Core (org.eclipse.jetty:jetty-server:9.4.11.v20180605 - http://www.eclipse.org/jetty)
         * Jetty :: Server Core (org.eclipse.jetty:jetty-server:9.4.14.v20181114 - http://www.eclipse.org/jetty)
-        * Jetty :: Servlet Handling (org.eclipse.jetty:jetty-servlet:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: Servlet Handling (org.eclipse.jetty:jetty-servlet:9.4.11.v20180605 - http://www.eclipse.org/jetty)
         * Jetty :: Servlet Handling (org.eclipse.jetty:jetty-servlet:9.4.14.v20181114 - http://www.eclipse.org/jetty)
         * Jetty :: SetUID Java (org.eclipse.jetty.toolchain.setuid:jetty-setuid-java:1.0.3 - http://www.eclipse.org/jetty/jetty-toolchain/jetty-setuid-parent/jetty-setuid-java)
-        * Jetty :: Utilities :: Ajax(JSON) (org.eclipse.jetty:jetty-util-ajax:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: Utilities (org.eclipse.jetty:jetty-util:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: Utilities (org.eclipse.jetty:jetty-util:9.4.11.v20180605 - http://www.eclipse.org/jetty)
+        * Jetty :: Utilities :: Ajax(JSON) (org.eclipse.jetty:jetty-util-ajax:9.4.14.v20181114 - http://www.eclipse.org/jetty)
         * Jetty :: Utilities (org.eclipse.jetty:jetty-util:9.4.14.v20181114 - http://www.eclipse.org/jetty)
-        * Jetty :: Utility Servlets and Filters (org.eclipse.jetty:jetty-servlets:9.4.11.v20180605 - http://www.eclipse.org/jetty)
         * Jetty :: Utility Servlets and Filters (org.eclipse.jetty:jetty-servlets:9.4.14.v20181114 - http://www.eclipse.org/jetty)
-        * Jetty :: Webapp Application Support (org.eclipse.jetty:jetty-webapp:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: Webapp Application Support (org.eclipse.jetty:jetty-webapp:9.4.11.v20180605 - http://www.eclipse.org/jetty)
-        * Jetty :: XML utilities (org.eclipse.jetty:jetty-xml:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: XML utilities (org.eclipse.jetty:jetty-xml:9.4.11.v20180605 - http://www.eclipse.org/jetty)
+        * Jetty :: Webapp Application Support (org.eclipse.jetty:jetty-webapp:9.4.14.v20181114 - http://www.eclipse.org/jetty)
+        * Jetty :: XML utilities (org.eclipse.jetty:jetty-xml:9.4.14.v20181114 - http://www.eclipse.org/jetty)
         * Jetty Orbit :: Servlet API (org.eclipse.jetty.orbit:javax.servlet:3.0.0.v201112011016 - http://www.eclipse.org/jetty/jetty-orbit/javax.servlet)
         * Jetty Server (org.mortbay.jetty:jetty:6.1.26 - http://www.eclipse.org/jetty/jetty-parent/project/modules/jetty)
         * Jetty Utilities (org.mortbay.jetty:jetty-util:6.1.26 - http://www.eclipse.org/jetty/jetty-parent/project/jetty-util)
 
     Apache License, Version 2.0, GNU General Public License, version 2
 
-        * RocksDB JNI (org.rocksdb:rocksdbjni:5.8.6 - http://rocksdb.org/)
+        * RocksDB JNI (org.rocksdb:rocksdbjni:5.18.3 - http://rocksdb.org/)
 
     Apache License, Version 2.0, GNU Lesser General Public License (LGPL), Version 2.1
 
@@ -657,7 +542,6 @@
 
     Apache License, Version 2.0, LGPL 2.1, MPL 1.1
 
-        * Javassist (org.javassist:javassist:3.20.0-GA - http://www.javassist.org/)
         * Javassist (org.javassist:javassist:3.22.0-GA - http://www.javassist.org/)
 
     BSD-2-Clause, Public Domain, per Creative Commons CC0
@@ -703,7 +587,6 @@
         * ASM Util (org.ow2.asm:asm-util:6.0_BETA - http://asm.objectweb.org/asm-util/)
         * Javolution (javolution:javolution:5.5.1 - http://javolution.org)
         * JLine (jline:jline:0.9.94 - http://jline.sourceforge.net)
-        * JLine (jline:jline:2.12 - http://nexus.sonatype.org/oss-repository-hosting.html/jline)
         * ParaNamer Core (com.thoughtworks.paranamer:paranamer:2.3 - http://paranamer.codehaus.org/paranamer)
         * Stax2 API (org.codehaus.woodstox:stax2-api:3.1.4 - http://wiki.fasterxml.com/WoodstoxStax2)
         * xmlenc Library (xmlenc:xmlenc:0.52 - http://xmlenc.sourceforge.net)
@@ -714,54 +597,23 @@
 
     Common Development and Distribution License
 
-        * aopalliance version 1.0 repackaged as a module (org.glassfish.hk2.external:aopalliance-repackaged:2.5.0-b32 - https://hk2.java.net/external/aopalliance-repackaged)
-        * aopalliance version 1.0 repackaged as a module (org.glassfish.hk2.external:aopalliance-repackaged:2.5.0-b42 - https://github.com/hk2-project/hk2/external/aopalliance-repackaged)
         * Expression Language 3.0 (org.glassfish:javax.el:3.0.0 - http://el-spec.java.net)
         * Expression Language 3.0 (org.glassfish:javax.el:3.0.1-b11 - http://uel.java.net)
-        * grizzly-framework (org.glassfish.grizzly:grizzly-framework:2.4.3 - http://grizzly.java.net/grizzly-framework)
-        * grizzly-http (org.glassfish.grizzly:grizzly-http:2.4.3 - http://grizzly.java.net/grizzly-http)
-        * grizzly-http-server (org.glassfish.grizzly:grizzly-http-server:2.4.3 - http://grizzly.java.net/grizzly-http-server)
-        * grizzly-http-servlet (org.glassfish.grizzly:grizzly-http-servlet:2.2.16 - http://grizzly.java.net/grizzly-http-servlet)
-        * HK2 API module (org.glassfish.hk2:hk2-api:2.5.0-b32 - https://hk2.java.net/hk2-api)
-        * HK2 API module (org.glassfish.hk2:hk2-api:2.5.0-b42 - https://github.com/hk2-project/hk2/hk2-api)
-        * HK2 Implementation Utilities (org.glassfish.hk2:hk2-utils:2.5.0-b32 - https://hk2.java.net/hk2-utils)
-        * HK2 Implementation Utilities (org.glassfish.hk2:hk2-utils:2.5.0-b42 - https://github.com/hk2-project/hk2/hk2-utils)
         * JavaServer Pages(TM) API (javax.servlet.jsp:javax.servlet.jsp-api:2.3.1 - http://jsp.java.net)
         * Java Servlet API (javax.servlet:javax.servlet-api:3.1.0 - http://servlet-spec.java.net)
-        * Java Servlet API (javax.servlet:javax.servlet-api:4.0.0 - https://javaee.github.io/servlet-spec/)
-        * javax.annotation API (javax.annotation:javax.annotation-api:1.2 - http://jcp.org/en/jsr/detail?id=250)
         * javax.annotation API (javax.annotation:javax.annotation-api:1.3.2 - http://jcp.org/en/jsr/detail?id=250)
-        * javax.inject:1 as OSGi bundle (org.glassfish.hk2.external:javax.inject:2.5.0-b32 - https://hk2.java.net/external/javax.inject)
-        * javax.inject:1 as OSGi bundle (org.glassfish.hk2.external:javax.inject:2.5.0-b42 - https://github.com/hk2-project/hk2/external/javax.inject)
-        * jersey-container-servlet-core (org.glassfish.jersey.containers:jersey-container-servlet-core:2.25.1 - https://jersey.java.net/project/jersey-container-servlet-core/)
-        * jersey-core-client (org.glassfish.jersey.core:jersey-client:2.25.1 - https://jersey.java.net/jersey-client/)
-        * jersey-core-common (org.glassfish.jersey.core:jersey-common:2.25.1 - https://jersey.java.net/jersey-common/)
-        * jersey-core-server (org.glassfish.jersey.core:jersey-server:2.25.1 - https://jersey.java.net/jersey-server/)
-        * jersey-media-jaxb (org.glassfish.jersey.media:jersey-media-jaxb:2.25.1 - https://jersey.java.net/project/jersey-media-jaxb/)
-        * jersey-repackaged-guava (org.glassfish.jersey.bundles.repackaged:jersey-guava:2.25.1 - https://jersey.java.net/project/project/jersey-guava/)
         * JSP implementation (org.glassfish.web:javax.servlet.jsp:2.3.2 - http://jsp.java.net)
-        * OSGi resource locator bundle - used by various API providers that rely on META-INF/services mechanism to locate providers. (org.glassfish.hk2:osgi-resource-locator:1.0.1 - http://glassfish.org/osgi-resource-locator/)
-        * ServiceLocator Default Implementation (org.glassfish.hk2:hk2-locator:2.5.0-b32 - https://hk2.java.net/hk2-locator)
-        * ServiceLocator Default Implementation (org.glassfish.hk2:hk2-locator:2.5.0-b42 - https://github.com/hk2-project/hk2/hk2-locator)
 
     Common Development and Distribution License (CDDL) v1.0
 
-        * Glassfish Jasper (org.mortbay.jetty:jsp-2.1:6.1.14 - http://jetty.mortbay.org/project/modules/jsp-2.1)
         * JavaBeans(TM) Activation Framework (javax.activation:activation:1.1.1 - http://java.sun.com/javase/technologies/desktop/javabeans/jaf/index.jsp)
-        * JavaBeans Activation Framework (JAF) (javax.activation:activation:1.1 - http://java.sun.com/products/javabeans/jaf/index.jsp)
         * JavaMail API (javax.mail:mail:1.4.1 - https://glassfish.dev.java.net/javaee5/mail/)
         * JSR-250 Common Annotations for the JavaTM Platform (javax.annotation:jsr250-api:1.0 - http://jcp.org/aboutJava/communityprocess/final/jsr250/index.html)
-        * Servlet Specification 2.5 API (org.mortbay.jetty:servlet-api-2.5:6.1.14 - http://jetty.mortbay.org/project/modules/servlet-api-2.5)
-
-    Common Development and Distribution License (CDDL) v1.0, GNU General Public Library
-
-        * Streaming API for XML (javax.xml.stream:stax-api:1.0-2 - no url defined)
 
     Common Development and Distribution License (CDDL) v1.1
 
         * Java Message Service (javax.jms:jms:1.1 - http://java.sun.com/products/jms)
         * Java Transaction API (javax.transaction:jta:1.1 - http://java.sun.com/products/jta)
-        * jersey-bom (org.glassfish.jersey:jersey-bom:2.27 - http://java.net/jersey-bom/)
         * jsp-api (javax.servlet:jsp-api:2.0 - no url defined)
         * jsp-api (javax.servlet.jsp:jsp-api:2.1 - no url defined)
         * servlet-api (javax.servlet:servlet-api:2.5 - no url defined)
@@ -770,14 +622,11 @@
     Common Development and Distribution License (CDDL) v1.1, The GNU General Public License (GPL), Version 2, With Classpath Exception
 
         * javax.ws.rs-api (javax.ws.rs:javax.ws.rs-api:2.0.1 - http://jax-rs-spec.java.net)
-        * javax.ws.rs-api (javax.ws.rs:javax.ws.rs-api:2.1 - http://jax-rs-spec.java.net)
         * jaxb-api (javax.xml.bind:jaxb-api:2.3.0 - https://github.com/javaee/jaxb-spec/jaxb-api)
-        * JAXB API bundle for GlassFish V3 (javax.xml.bind:jaxb-api:2.2.2 - https://jaxb.dev.java.net/)
         * JAXB Reference Implementation (com.sun.xml.bind:jaxb-impl:2.2.6 - http://jaxb.java.net/)
         * JAXB RI (com.sun.xml.bind:jaxb-impl:2.2.3-1 - http://jaxb.java.net/)
         * jersey-client (com.sun.jersey:jersey-client:1.9 - https://jersey.java.net/jersey-client/)
         * jersey-core (com.sun.jersey:jersey-core:1.9 - https://jersey.java.net/jersey-core/)
-        * jersey-grizzly2 (com.sun.jersey:jersey-grizzly2:1.19.4 - https://jersey.java.net/jersey-grizzly2/)
         * jersey-guice (com.sun.jersey.contribs:jersey-guice:1.9 - https://jersey.java.net/jersey-contribs/jersey-guice/)
         * jersey-json (com.sun.jersey:jersey-json:1.9 - https://jersey.java.net/jersey-json/)
         * jersey-server (com.sun.jersey:jersey-server:1.9 - https://jersey.java.net/jersey-server/)
@@ -786,27 +635,49 @@
 
         * jnr-posix (com.github.jnr:jnr-posix:3.0.27 - http://nexus.sonatype.org/oss-repository-hosting.html/jnr-posix)
 
+    Eclipse Distribution License, Version 1.0
+
+        * jakarta.xml.bind-api (jakarta.xml.bind:jakarta.xml.bind-api:2.3.2 - https://github.com/eclipse-ee4j/jaxb-api/jakarta.xml.bind-api)
+        * JavaBeans Activation Framework (com.sun.activation:jakarta.activation:1.2.1 - https://github.com/eclipse-ee4j/jaf/jakarta.activation)
+        * JavaBeans Activation Framework API jar (jakarta.activation:jakarta.activation-api:1.2.1 - https://github.com/eclipse-ee4j/jaf/jakarta.activation-api)
+
     Eclipse Public License, Version 1.0
 
         * Aether Utilities (org.eclipse.aether:aether-util:0.9.0.M2 - http://www.eclipse.org/aether/aether-util/)
-        * clojure (org.clojure:clojure:1.3.0-beta1 - http://clojure.org/)
-        * JUnit (junit:junit:4.12 - http://junit.org)
+        * clojure (org.clojure:clojure:1.10.0 - http://clojure.org/)
+        * core.specs.alpha (org.clojure:core.specs.alpha:0.2.44 - https://github.com/clojure/build.poms/core.specs.alpha)
         * org.eclipse.sisu.inject (org.eclipse.sisu:org.eclipse.sisu.inject:0.3.3 - http://www.eclipse.org/sisu/org.eclipse.sisu.inject/)
         * org.eclipse.sisu.plexus (org.eclipse.sisu:org.eclipse.sisu.plexus:0.3.3 - http://www.eclipse.org/sisu/org.eclipse.sisu.plexus/)
+        * spec.alpha (org.clojure:spec.alpha:0.2.176 - https://github.com/clojure/build.poms/spec.alpha)
         * tools.logging (org.clojure:tools.logging:0.2.3 - http://nexus.sonatype.org/oss-repository-hosting.html/pom.contrib/tools.logging)
 
-    Eclipse Public License, Version 1.0, GNU Lesser General Public License
+    Eclipse Public License, Version 2.0
 
-        * Logback Classic Module (ch.qos.logback:logback-classic:1.2.3 - http://logback.qos.ch/logback-classic)
+        * grizzly-framework (org.glassfish.grizzly:grizzly-framework:2.4.4 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-framework)
+        * grizzly-http (org.glassfish.grizzly:grizzly-http:2.4.4 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-http)
+        * grizzly-http-server (org.glassfish.grizzly:grizzly-http-server:2.4.4 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-http-server)
+        * grizzly-http-servlet (org.glassfish.grizzly:grizzly-http-servlet:2.4.4 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-http-servlet)
 
-    GNU General Public License, version 2 (GPL2), with the classpath exception, MIT License
+    Eclipse Public License, Version 2.0, The GNU General Public License (GPL), Version 2, With Classpath Exception
 
-        * Checker Qual (org.checkerframework:checker-compat-qual:2.0.0 - http://checkerframework.org)
-        * Checker Qual (org.checkerframework:checker-qual:2.0.0 - http://checkerframework.org)
+        * aopalliance version 1.0 repackaged as a module (org.glassfish.hk2.external:aopalliance-repackaged:2.5.0 - https://github.com/eclipse-ee4j/glassfish-hk2/external/aopalliance-repackaged)
+        * Expression Language 3.0 (org.glassfish:jakarta.el:3.0.2 - https://projects.eclipse.org/projects/ee4j.el)
+        * Expression Language 3.0 API (jakarta.el:jakarta.el-api:3.0.2 - https://projects.eclipse.org/projects/ee4j.el)
+        * HK2 API module (org.glassfish.hk2:hk2-api:2.5.0 - https://github.com/eclipse-ee4j/glassfish-hk2/hk2-api)
+        * HK2 Implementation Utilities (org.glassfish.hk2:hk2-utils:2.5.0 - https://github.com/eclipse-ee4j/glassfish-hk2/hk2-utils)
+        * jakarta.annotation API (jakarta.annotation:jakarta.annotation-api:1.3.4 - https://projects.eclipse.org/projects/ee4j.ca)
+        * Java Servlet API (jakarta.servlet:jakarta.servlet-api:4.0.2 - https://projects.eclipse.org/projects/ee4j.servlet)
+        * javax.inject:1 as OSGi bundle (org.glassfish.hk2.external:jakarta.inject:2.5.0 - https://github.com/eclipse-ee4j/glassfish-hk2/external/jakarta.inject)
+        * javax.ws.rs-api (jakarta.ws.rs:jakarta.ws.rs-api:2.1.5 - https://github.com/eclipse-ee4j/jaxrs-api)
+        * OSGi resource locator (org.glassfish.hk2:osgi-resource-locator:1.0.3 - https://projects.eclipse.org/projects/ee4j/osgi-resource-locator)
+        * ServiceLocator Default Implementation (org.glassfish.hk2:hk2-locator:2.5.0 - https://github.com/eclipse-ee4j/glassfish-hk2/hk2-locator)
+
+    Eclipse Public License (EPL) 1.0, GNU Lesser General Public License Version 2.1, February 1999
+
+        * JGraphT - Core (org.jgrapht:jgrapht-core:0.9.0 - http://www.jgrapht.org/jgrapht-core)
 
     MIT License
 
-        * Animal Sniffer Annotations (org.codehaus.mojo:animal-sniffer-annotations:1.14 - http://mojo.codehaus.org/animal-sniffer/animal-sniffer-annotations)
         * Animal Sniffer Annotations (org.codehaus.mojo:animal-sniffer-annotations:1.17 - http://www.mojohaus.org/animal-sniffer/animal-sniffer-annotations)
         * argparse4j (net.sourceforge.argparse4j:argparse4j:0.8.1 - http://argparse4j.github.io)
         * azure-eventhubs (com.microsoft.azure:azure-eventhubs:0.13.1 - https://github.com/Azure/azure-event-hubs/)
@@ -814,25 +685,14 @@
         * JCL 1.1.1 implemented over SLF4J (org.slf4j:jcl-over-slf4j:1.7.7 - http://www.slf4j.org)
         * JCL 1.2 implemented over SLF4J (org.slf4j:jcl-over-slf4j:1.7.25 - http://www.slf4j.org)
         * JCodings (org.jruby.jcodings:jcodings:1.0.18 - http://nexus.sonatype.org/oss-repository-hosting.html/jcodings)
-        * JCodings (org.jruby.jcodings:jcodings:1.0.8 - no url defined)
         * Jedis (redis.clients:jedis:2.9.0 - https://github.com/xetorthio/jedis)
         * jnr-x86asm (com.github.jnr:jnr-x86asm:1.0.2 - http://github.com/jnr/jnr-x86asm)
         * Joni (org.jruby.joni:joni:2.1.11 - http://nexus.sonatype.org/oss-repository-hosting.html/joni)
-        * Joni (org.jruby.joni:joni:2.1.2 - http://nexus.sonatype.org/oss-repository-hosting.html/joni)
         * JUL to SLF4J bridge (org.slf4j:jul-to-slf4j:1.7.25 - http://www.slf4j.org)
-        * SLF4J API Module (org.slf4j:slf4j-api:1.6.6 - http://www.slf4j.org)
-        * SLF4J API Module (org.slf4j:slf4j-api:1.7.10 - http://www.slf4j.org)
-        * SLF4J API Module (org.slf4j:slf4j-api:1.7.12 - http://www.slf4j.org)
-        * SLF4J API Module (org.slf4j:slf4j-api:1.7.13 - http://www.slf4j.org)
-        * SLF4J API Module (org.slf4j:slf4j-api:1.7.21 - http://www.slf4j.org)
-        * SLF4J API Module (org.slf4j:slf4j-api:1.7.22 - http://www.slf4j.org)
-        * SLF4J API Module (org.slf4j:slf4j-api:1.7.25 - http://www.slf4j.org)
-        * SLF4J API Module (org.slf4j:slf4j-api:1.7.5 - http://www.slf4j.org)
+        * SLF4J API Module (org.slf4j:slf4j-api:1.7.26 - http://www.slf4j.org)
         * SLF4J API Module (org.slf4j:slf4j-api:1.7.6 - http://www.slf4j.org)
-        * SLF4J API Module (org.slf4j:slf4j-api:1.7.7 - http://www.slf4j.org)
-        * SLF4J LOG4J-12 Binding (org.slf4j:slf4j-log4j12:1.6.6 - http://www.slf4j.org)
-        * SLF4J LOG4J-12 Binding (org.slf4j:slf4j-log4j12:1.7.10 - http://www.slf4j.org)
-        * SLF4J LOG4J-12 Binding (org.slf4j:slf4j-log4j12:1.7.25 - http://www.slf4j.org)
+        * SLF4J LOG4J-12 Binding (org.slf4j:slf4j-log4j12:1.7.26 - http://www.slf4j.org)
+        * System Out and Err redirected to SLF4J (uk.org.lidalia:sysout-over-slf4j:1.0.2 - http://projects.lidalia.org.uk/sysout-over-slf4j/)
 
     Mozilla Public License Version 1.1
 
@@ -845,11 +705,6 @@
     Public Domain
 
         * AOP alliance (aopalliance:aopalliance:1.0 - http://aopalliance.sourceforge.net)
-        * XZ for Java (org.tukaani:xz:1.0 - http://tukaani.org/xz/java.html)
-
-    Public Domain, per Creative Commons CC0
-
-        * HdrHistogram (org.hdrhistogram:HdrHistogram:2.1.6 - http://hdrhistogram.github.io/HdrHistogram/)
 
     Revised BSD
 
diff --git a/DEVELOPER.md b/DEVELOPER.md
index 35e0696..70f8871 100644
--- a/DEVELOPER.md
+++ b/DEVELOPER.md
@@ -194,11 +194,17 @@
 **Important: A pull request must first be properly approved before you are allowed to merge it.**
 
 Committers that are integrating patches or pull requests should use the official Apache repository at
-[https://git-wip-us.apache.org/repos/asf/storm.git](https://git-wip-us.apache.org/repos/asf/storm.git).
+[https://gitbox.apache.org/repos/asf/storm.git](https://gitbox.apache.org/repos/asf/storm.git).
+
+#### Via Github
+
+You can use the [Gitbox account linking utility](https://gitbox.apache.org/setup/) to link your Apache and Github accounts. This will allow you to merge pull requests using Github's UI. 
+
+#### Via your terminal
 
 To pull in a merge request you should generally follow the command line instructions sent out by GitHub.
 
-1. Go to your local copy of the [Apache git repo](https://git-wip-us.apache.org/repos/asf/storm.git), switch
+1. Go to your local copy of the [Apache git repo](https://gitbox.apache.org/repos/asf/storm.git), switch
    to the `master` branch, and make sure it is up to date.
 
         $ git checkout master
@@ -311,7 +317,7 @@
  
 ## Listing dependency licenses
 
-You can generate a list of dependencies and their licenses by running `mvn generate-resources -Dlicense.skipAggregateAddThirdParty=false` in the project root.
+You can generate a list of dependencies and their licenses by running `mvn license:aggregate-add-third-party@generate-and-check-licenses -Dlicense.skipAggregateAddThirdParty=false` in the project root.
 The list will be put in DEPENDENCY_LICENSES.
 
 The license aggregation plugin will use the license listed in a dependency's POM. If the license is missing, or incomplete (e.g. due to multiple licenses), you can override the license by describing the dependency in the THIRD-PARTY.properties file in the project root.
@@ -321,11 +327,7 @@
 
 When auditing the binary LICENSE-binary and NOTICE-binary, there are a couple of helper scripts available in dev-tools. `collect_license_files` can create an aggregate NOTICE from the libraries in an extracted distribution. The aggregate NOTICE should be adjusted to remove Storm notices and duplicates, and added to the NOTICE-binary.
 
-`list_jars` can list the jars in an extracted binary distribution. Note that while listing all the jars in the binary distribution is helpful, special attention must be paid to shaded jars, as they may contain shaded dependencies that must be listed in LICENSE-binary separately.
-
-The license plugin can generate a list of dependencies with licenses for the binary distribution with the following command: `mvn generate-resources -Dlicense.skipAggregateAddThirdParty=false` in the storm-dist/binary directory. 
-
-The generated list in target/generated-sources/license/THIRD-PARTY.txt is mostly complete, and a good input to the LICENSE-binary file. The major omission in it is the storm-shaded-deps dependencies, as they are shaded. These dependencies can be manually listed with `mvn dependency:list` in the storm-shaded-deps project, and then manually added. 
+The `dev-tools/validate-license-files.py` script will check that LICENSE-binary and DEPENDENCY_LICENSES are up to date. Regenerating DEPENDENCY_LICENSES simply requires rerunning the license plugin (see above). LICENSE-binary must be updated manually. The script will check that the dependencies included in a storm-dist/binary build are present in LICENSE-binary, and that no other dependencies are listed. Any additional or missing dependencies are printed to console, and can be added to LICENSE-binary manually. There will likely be an entry for them in `DEPENDENCY_LICENSES` that can be copy-pasted to LICENSE-binary.
 
 You can download the dependency licenses by running `mvn package -Dlicense.skipAggregateDownloadLicenses=false -DskipTests` in the project root. This will put the licenses in target/generated-resources. Keep an eye on the Maven output, as some dependencies may not have licenses configured correctly. These will have to be downloaded manually.
 
@@ -406,7 +408,7 @@
 repository associated with Storm.
 
 * **Committers only:**
-  [https://git-wip-us.apache.org/repos/asf/storm.git](https://git-wip-us.apache.org/repos/asf/storm.git)
+  [https://gitbox.apache.org/repos/asf/storm.git](https://gitbox.apache.org/repos/asf/storm.git)
   is the official and authoritative git repository for Storm, managed under the umbrella of the Apache Software
   Foundation.  Only official Storm committers will interact with this repository.
   When you push the first time to this repository git will prompt you for your username and password.  Use your Apache
@@ -419,7 +421,7 @@
 
 An automated bot (called _[ASF GitHub Bot](https://issues.apache.org/jira/secure/ViewProfile.jspa?name=githubbot)_ in
 [Storm JIRA](https://issues.apache.org/jira/browse/STORM)) runs periodically to merge changes in the
-[official Apache repo](https://git-wip-us.apache.org/repos/asf/storm.git) to the read-only
+[official Apache repo](https://gitbox.apache.org/repos/asf/storm.git) to the read-only
 [GitHub mirror repository](https://github.com/apache/storm/), and to merge comments in GitHub pull requests to
 the [Storm JIRA](https://issues.apache.org/jira/browse/STORM).
 
diff --git a/LICENSE-binary b/LICENSE-binary
index 023f642..9dfcd5f 100644
--- a/LICENSE-binary
+++ b/LICENSE-binary
@@ -649,9 +649,25 @@
     Apache License, Version 2.0
 
         * HttpClient (commons-httpclient:commons-httpclient:3.0.1 - http://jakarta.apache.org/commons/httpclient/)
-        * Log4j Implemented Over SLF4J (org.slf4j:log4j-over-slf4j:1.6.6 - http://www.slf4j.org)
-        * Log4j Implemented Over SLF4J (org.slf4j:log4j-over-slf4j:1.7.25 - http://www.slf4j.org)
-    
+        * Plexus Common Utilities (org.codehaus.plexus:plexus-utils:3.1.0 - http://codehaus-plexus.github.io/plexus-utils/)
+        * Maven Artifact (org.apache.maven:maven-artifact:3.6.0 - https://maven.apache.org/ref/3.6.0/maven-artifact/)
+        * Maven Builder Support (org.apache.maven:maven-builder-support:3.6.0 - https://maven.apache.org/ref/3.6.0/maven-builder-support/)
+        * Maven Artifact Resolver Transport File (org.apache.maven.resolver:maven-resolver-transport-file:1.3.3 - https://maven.apache.org/resolver/maven-resolver-transport-file/)
+        * Maven Artifact Resolver Transport HTTP (org.apache.maven.resolver:maven-resolver-transport-http:1.3.3 - https://maven.apache.org/resolver/maven-resolver-transport-http/)
+        * Maven Repository Metadata Model (org.apache.maven:maven-repository-metadata:3.6.0 - https://maven.apache.org/ref/3.6.0/maven-repository-metadata/)
+        * Maven Artifact Resolver Provider (org.apache.maven:maven-resolver-provider:3.6.0 - https://maven.apache.org/ref/3.6.0/maven-resolver-provider/)
+        * zookeeper (org.apache.zookeeper:zookeeper:3.4.14 - no url defined)
+        * Maven Model (org.apache.maven:maven-model:3.6.0 - https://maven.apache.org/ref/3.6.0/maven-model/)
+        * Netty/All-in-One (io.netty:netty-all:4.1.30.Final - http://netty.io/netty-all/)
+        * Maven Artifact Resolver API (org.apache.maven.resolver:maven-resolver-api:1.3.3 - https://maven.apache.org/resolver/maven-resolver-api/)
+        * Maven Artifact Resolver SPI (org.apache.maven.resolver:maven-resolver-spi:1.3.3 - https://maven.apache.org/resolver/maven-resolver-spi/)
+        * Maven Artifact Resolver Implementation (org.apache.maven.resolver:maven-resolver-impl:1.3.3 - https://maven.apache.org/resolver/maven-resolver-impl/)
+        * Maven Model Builder (org.apache.maven:maven-model-builder:3.6.0 - https://maven.apache.org/ref/3.6.0/maven-model-builder/)
+        * Plexus :: Component Annotations (org.codehaus.plexus:plexus-component-annotations:1.7.1 - http://codehaus-plexus.github.io/plexus-containers/plexus-component-annotations/)
+        * Maven Artifact Resolver Utilities (org.apache.maven.resolver:maven-resolver-util:1.3.3 - https://maven.apache.org/resolver/maven-resolver-util/)
+        * Maven Artifact Resolver Connector Basic (org.apache.maven.resolver:maven-resolver-connector-basic:1.3.3 - https://maven.apache.org/resolver/maven-resolver-connector-basic/)
+        * Netty (io.netty:netty:3.10.6.Final - http://netty.io/)
+        * Curator Recipes (org.apache.curator:curator-recipes:4.2.0 - http://curator.apache.org/curator-recipes)
         * Aggregate Designer Algorithm (net.hydromatic:aggdesigner-algorithm:6.0 - http://github.com/julianhyde/aggdesigner/aggdesigner-algorithm)
         * aircompressor (io.airlift:aircompressor:0.3 - http://github.com/airlift/aircompressor)
         * Annotation 1.0 (org.apache.geronimo.specs:geronimo-annotation_1.0_spec:1.1.1 - http://geronimo.apache.org/specs/geronimo-annotation_1.0_spec)
@@ -664,7 +680,6 @@
         * Apache Calcite Avatica (org.apache.calcite.avatica:avatica-core:1.10.0 - https://calcite.apache.org/avatica/avatica-core)
         * Apache Calcite Avatica Metrics (org.apache.calcite.avatica:avatica-metrics:1.8.0 - http://calcite.apache.org/avatica/avatica-metrics)
         * Apache Commons CLI (commons-cli:commons-cli:1.4 - http://commons.apache.org/proper/commons-cli/)
-        * Apache Commons Codec (commons-codec:commons-codec:1.10 - http://commons.apache.org/proper/commons-codec/)
         * Apache Commons Codec (commons-codec:commons-codec:1.11 - http://commons.apache.org/proper/commons-codec/)
         * Apache Commons Collections (commons-collections:commons-collections:3.2.2 - http://commons.apache.org/collections/)
         * Apache Commons Compress (org.apache.commons:commons-compress:1.18 - https://commons.apache.org/proper/commons-compress/)
@@ -672,11 +687,11 @@
         * Apache Commons CSV (org.apache.commons:commons-csv:1.4 - http://commons.apache.org/proper/commons-csv/)
         * Apache Commons Exec (org.apache.commons:commons-exec:1.3 - http://commons.apache.org/proper/commons-exec/)
         * Apache Commons FileUpload (commons-fileupload:commons-fileupload:1.3.3 - http://commons.apache.org/proper/commons-fileupload/)
-        * Apache Commons IO (commons-io:commons-io:2.5 - http://commons.apache.org/proper/commons-io/)
         * Apache Commons IO (commons-io:commons-io:2.6 - http://commons.apache.org/proper/commons-io/)
         * Apache Commons Lang (org.apache.commons:commons-lang3:3.2 - http://commons.apache.org/proper/commons-lang/)
         * Apache Commons Lang (org.apache.commons:commons-lang3:3.6 - http://commons.apache.org/proper/commons-lang/)
         * Apache Commons Lang (org.apache.commons:commons-lang3:3.7 - http://commons.apache.org/proper/commons-lang/)
+        * Apache Commons Lang (org.apache.commons:commons-lang3:3.8.1 - http://commons.apache.org/proper/commons-lang/)
         * Apache Commons Logging (commons-logging:commons-logging:1.2 - http://commons.apache.org/proper/commons-logging/)
         * Apache Commons Math (org.apache.commons:commons-math3:3.6.1 - http://commons.apache.org/proper/commons-math/)
         * Apache Commons Text (org.apache.commons:commons-text:1.2 - http://commons.apache.org/proper/commons-text/)
@@ -687,13 +702,12 @@
         * ApacheDS I18n (org.apache.directory.server:apacheds-i18n:2.0.0-M15 - http://directory.apache.org/apacheds/1.5/apacheds-i18n)
         * ApacheDS Protocol Kerberos Codec (org.apache.directory.server:apacheds-kerberos-codec:2.0.0-M15 - http://directory.apache.org/apacheds/1.5/apacheds-kerberos-codec)
         * Apache Groovy (org.codehaus.groovy:groovy-all:2.4.4 - http://groovy-lang.org)
+        * Apache Hadoop Archives (org.apache.hadoop:hadoop-archives:2.8.5 - no url defined)
         * Apache Hadoop Annotations (org.apache.hadoop:hadoop-annotations:2.8.5 - no url defined)
-        * Apache Hadoop Archives (org.apache.hadoop:hadoop-archives:2.7.2 - no url defined)
-        * Apache Hadoop Auth (org.apache.hadoop:hadoop-auth:2.7.7 - no url defined)
         * Apache Hadoop Auth (org.apache.hadoop:hadoop-auth:2.8.5 - no url defined)
         * Apache Hadoop Client (org.apache.hadoop:hadoop-client:2.8.5 - no url defined)
         * Apache Hadoop Common (org.apache.hadoop:hadoop-common:2.8.5 - no url defined)
-        * Apache Hadoop HDFS (org.apache.hadoop:hadoop-hdfs:2.7.7 - no url defined)
+        * Apache Hadoop HDFS (org.apache.hadoop:hadoop-hdfs:2.8.5 - no url defined)
         * Apache Hadoop HDFS Client (org.apache.hadoop:hadoop-hdfs-client:2.8.5 - no url defined)
         * Apache Hadoop MapReduce App (org.apache.hadoop:hadoop-mapreduce-client-app:2.8.5 - no url defined)
         * Apache Hadoop MapReduce Common (org.apache.hadoop:hadoop-mapreduce-client-common:2.8.5 - no url defined)
@@ -704,6 +718,10 @@
         * Apache Hadoop YARN Client (org.apache.hadoop:hadoop-yarn-client:2.8.5 - no url defined)
         * Apache Hadoop YARN Common (org.apache.hadoop:hadoop-yarn-common:2.8.5 - no url defined)
         * Apache Hadoop YARN Server Common (org.apache.hadoop:hadoop-yarn-server-common:2.8.5 - no url defined)
+        * Apache Hadoop YARN ApplicationHistoryService (org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice:2.8.5 - no url defined)
+        * Apache Hadoop YARN Registry (org.apache.hadoop:hadoop-yarn-registry:2.8.5 - no url defined)
+        * Apache Hadoop YARN ResourceManager (org.apache.hadoop:hadoop-yarn-server-resourcemanager:2.8.5 - no url defined)
+        * Apache Hadoop YARN Web Proxy (org.apache.hadoop:hadoop-yarn-server-web-proxy:2.8.5 - no url defined)
         * Apache HBase - Annotations (org.apache.hbase:hbase-annotations:2.1.3 - http://hbase.apache.org/hbase-annotations)
         * Apache HBase - Client (org.apache.hbase:hbase-client:2.1.3 - http://hbase.apache.org/hbase-build-configuration/hbase-client)
         * Apache HBase - Common (org.apache.hbase:hbase-common:2.1.3 - http://hbase.apache.org/hbase-build-configuration/hbase-common)
@@ -721,25 +739,20 @@
         * Apache HBase - Server (org.apache.hbase:hbase-server:2.1.3 - http://hbase.apache.org/hbase-build-configuration/hbase-server)
         * Apache HBase - Shaded Protocol (org.apache.hbase:hbase-protocol-shaded:2.1.3 - http://hbase.apache.org/hbase-build-configuration/hbase-protocol-shaded)
         * Apache HBase - Zookeeper (org.apache.hbase:hbase-zookeeper:2.1.3 - http://hbase.apache.org/hbase-build-configuration/hbase-zookeeper)
-        * Apache HttpClient (org.apache.httpcomponents:httpclient:4.3.5 - http://hc.apache.org/httpcomponents-client)
-        * Apache HttpClient (org.apache.httpcomponents:httpclient:4.5.2 - http://hc.apache.org/httpcomponents-client)
         * Apache HttpClient (org.apache.httpcomponents:httpclient:4.5.6 - http://hc.apache.org/httpcomponents-client)
-        * Apache HttpCore (org.apache.httpcomponents:httpcore:4.3.2 - http://hc.apache.org/httpcomponents-core-ga)
         * Apache HttpCore (org.apache.httpcomponents:httpcore:4.4.10 - http://hc.apache.org/httpcomponents-core-ga)
-        * Apache HttpCore (org.apache.httpcomponents:httpcore:4.4.4 - http://hc.apache.org/httpcomponents-core-ga)
         * Apache Ivy (org.apache.ivy:ivy:2.4.0 - http://ant.apache.org/ivy/)
         * Apache Kafka (org.apache.kafka:kafka-clients:0.11.0.3 - http://kafka.apache.org)
         * Apache Log4j (log4j:log4j:1.2.17 - http://logging.apache.org/log4j/1.2/)
-        * Apache Log4j 1.x Compatibility API (org.apache.logging.log4j:log4j-1.2-api:2.6.2 - http://logging.apache.org/log4j/2.x/log4j-1.2-api/)
-        * Apache Log4j API (org.apache.logging.log4j:log4j-api:2.11.1 - https://logging.apache.org/log4j/2.x/log4j-api/)
-        * Apache Log4j API (org.apache.logging.log4j:log4j-api:2.6.2 - http://logging.apache.org/log4j/2.x/log4j-api/)
-        * Apache Log4j Core (org.apache.logging.log4j:log4j-core:2.11.1 - https://logging.apache.org/log4j/2.x/log4j-core/)
-        * Apache Log4j Core (org.apache.logging.log4j:log4j-core:2.6.2 - http://logging.apache.org/log4j/2.x/log4j-core/)
-        * Apache Log4j SLF4J Binding (org.apache.logging.log4j:log4j-slf4j-impl:2.11.1 - https://logging.apache.org/log4j/2.x/log4j-slf4j-impl/)
-        * Apache Log4j Web (org.apache.logging.log4j:log4j-web:2.6.2 - http://logging.apache.org/log4j/2.x/log4j-web/)
+        * Apache Log4j 1.x Compatibility API (org.apache.logging.log4j:log4j-1.2-api:2.11.2 - https://logging.apache.org/log4j/2.x/log4j-1.2-api/)
+        * Apache Log4j API (org.apache.logging.log4j:log4j-api:2.11.2 - https://logging.apache.org/log4j/2.x/log4j-api/)
+        * Apache Log4j Core (org.apache.logging.log4j:log4j-core:2.11.2 - https://logging.apache.org/log4j/2.x/log4j-core/)
+        * Apache Log4j SLF4J Binding (org.apache.logging.log4j:log4j-slf4j-impl:2.11.2 - https://logging.apache.org/log4j/2.x/log4j-slf4j-impl/)
+        * Apache Log4j Web (org.apache.logging.log4j:log4j-web:2.11.2 - https://logging.apache.org/log4j/2.x/log4j-web/)
         * Apache Parquet Hadoop Bundle (org.apache.parquet:parquet-hadoop-bundle:1.8.1 - https://parquet.apache.org)
         * Apache Thrift (org.apache.thrift:libfb303:0.9.3 - http://thrift.apache.org)
-        * Apache Thrift (org.apache.thrift:libthrift:0.9.3 - http://thrift.apache.org)
+        * Apache Thrift (org.apache.thrift:libthrift:0.12.0 - http://thrift.apache.org)
+        * Plexus Interpolation API (org.codehaus.plexus:plexus-interpolation:1.25 - http://codehaus-plexus.github.io/plexus-interpolation/)
         * Apache Twill API (org.apache.twill:twill-api:0.6.0-incubating - http://twill.incubator.apache.org/twill-api)
         * Apache Twill common library (org.apache.twill:twill-common:0.6.0-incubating - http://twill.incubator.apache.org/twill-common)
         * Apache Twill core library (org.apache.twill:twill-core:0.6.0-incubating - http://twill.incubator.apache.org/twill-core)
@@ -749,20 +762,15 @@
         * Apache Velocity (org.apache.velocity:velocity:1.5 - http://velocity.apache.org/engine/releases/velocity-1.5/)
         * Apache Yetus - Audience Annotations (org.apache.yetus:audience-annotations:0.5.0 - https://yetus.apache.org/audience-annotations)
         * ASM based accessors helper used by json-smart (net.minidev:accessors-smart:1.2 - http://www.minidev.net/)
-        * Bean Validation API (javax.validation:validation-api:1.1.0.Final - http://beanvalidation.org)
         * BoneCP :: Core Library (com.jolbox:bonecp:0.8.0.RELEASE - http://jolbox.com/bonecp)
         * Calcite Core (org.apache.calcite:calcite-core:1.14.0 - https://calcite.apache.org/calcite-core)
         * Calcite Druid (org.apache.calcite:calcite-druid:1.10.0 - http://calcite.apache.org/calcite-druid)
         * Calcite Linq4j (org.apache.calcite:calcite-linq4j:1.10.0 - http://calcite.apache.org/calcite-linq4j)
         * Calcite Linq4j (org.apache.calcite:calcite-linq4j:1.14.0 - https://calcite.apache.org/calcite-linq4j)
         * carbonite (com.twitter:carbonite:1.5.0 - no url defined)
-        * CDI APIs (javax.enterprise:cdi-api:1.0 - http://www.seamframework.org/Weld/cdi-api)
         * chill-java (com.twitter:chill-java:0.8.0 - https://github.com/twitter/chill)
         * ClassMate (com.fasterxml:classmate:1.3.1 - http://github.com/cowtowncoder/java-classmate)
         * com.papertrail:profiler (com.papertrail:profiler:1.0.2 - https://github.com/papertrail/profiler)
-        * Commons CLI (commons-cli:commons-cli:1.2 - http://commons.apache.org/cli/)
-        * Commons Codec (commons-codec:commons-codec:1.6 - http://commons.apache.org/codec/)
-        * Commons Compress (org.apache.commons:commons-compress:1.4.1 - http://commons.apache.org/compress/)
         * Commons Configuration (commons-configuration:commons-configuration:1.6 - http://commons.apache.org/${pom.artifactId.substring(8)}/)
         * Commons Daemon (commons-daemon:commons-daemon:1.0.13 - http://commons.apache.org/daemon/)
         * Commons DBCP (commons-dbcp:commons-dbcp:1.4 - http://commons.apache.org/dbcp/)
@@ -770,11 +778,8 @@
         * Commons Logging (commons-logging:commons-logging:1.1.3 - http://commons.apache.org/proper/commons-logging/)
         * Commons Net (commons-net:commons-net:3.1 - http://commons.apache.org/net/)
         * Commons Pool (commons-pool:commons-pool:1.5.4 - http://commons.apache.org/pool/)
-        * Curator Client (org.apache.curator:curator-client:2.7.1 - http://curator.apache.org/curator-client)
         * Curator Client (org.apache.curator:curator-client:4.2.0 - http://curator.apache.org/curator-client)
-        * Curator Framework (org.apache.curator:curator-framework:2.7.1 - http://curator.apache.org/curator-framework)
         * Curator Framework (org.apache.curator:curator-framework:4.2.0 - http://curator.apache.org/curator-framework)
-        * Curator Recipes (org.apache.curator:curator-recipes:2.7.1 - http://curator.apache.org/curator-recipes)
         * Data Mapper for Jackson (org.codehaus.jackson:jackson-mapper-asl:1.9.13 - http://jackson.codehaus.org)
         * DataNucleus Core (org.datanucleus:datanucleus-core:4.1.17 - http://www.datanucleus.org/#/datanucleus-core)
         * DataNucleus JDO API plugin (org.datanucleus:datanucleus-api-jdo:4.2.4 - http://www.datanucleus.org/#/datanucleus-api-jdo)
@@ -807,16 +812,11 @@
         * Google Guice - Extensions - Servlet (com.google.inject.extensions:guice-servlet:3.0 - http://code.google.com/p/google-guice/extensions-parent/guice-servlet/)
         * Graphite Integration for Metrics (io.dropwizard.metrics:metrics-graphite:3.2.6 - http://metrics.dropwizard.io/metrics-graphite/)
         * Gson (com.google.code.gson:gson:2.2.4 - http://code.google.com/p/google-gson/)
-        * Guava: Google Core Libraries for Java (com.google.guava:guava:11.0.2 - http://code.google.com/p/guava-libraries/guava)
         * Guava: Google Core Libraries for Java (com.google.guava:guava:16.0.1 - http://code.google.com/p/guava-libraries/guava)
         * Guava: Google Core Libraries for Java (com.google.guava:guava:27.0.1-jre - https://github.com/google/guava/guava)
         * Guava InternalFutureFailureAccess and InternalFutures (com.google.guava:failureaccess:1.0.1 - https://github.com/google/guava/failureaccess)
         * Guava ListenableFuture only (com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava - https://github.com/google/guava/listenablefuture)
         * Hadoop Metrics2 Reporter for Dropwizard Metrics (com.github.joshelser:dropwizard-metrics-hadoop-metrics2-reporter:0.1.2 - https://github.com/joshelser/dropwizard-hadoop-metrics2)
-        * hadoop-yarn-registry (org.apache.hadoop:hadoop-yarn-registry:2.7.1 - no url defined)
-        * hadoop-yarn-server-applicationhistoryservice (org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice:2.7.2 - no url defined)
-        * hadoop-yarn-server-resourcemanager (org.apache.hadoop:hadoop-yarn-server-resourcemanager:2.7.2 - no url defined)
-        * hadoop-yarn-server-web-proxy (org.apache.hadoop:hadoop-yarn-server-web-proxy:2.7.2 - no url defined)
         * Hibernate Validator Engine (org.hibernate:hibernate-validator:5.4.2.Final - http://hibernate.org/validator/hibernate-validator)
         * HikariCP (com.zaxxer:HikariCP:2.5.1 - https://github.com/brettwooldridge/HikariCP)
         * Hive CLI (org.apache.hive:hive-cli:2.3.4 - http://hive.apache.org/hive-cli)
@@ -840,30 +840,26 @@
         * Hive Shims Scheduler (org.apache.hive.shims:hive-shims-scheduler:2.3.4 - http://hive.apache.org/hive-shims-scheduler)
         * Hive Storage API (org.apache.hive:hive-storage-api:2.4.0 - https://www.apache.org/hive-storage-api/)
         * Hive Vector-Code-Gen Utilities (org.apache.hive:hive-vector-code-gen:2.3.4 - http://hive.apache.org/hive-vector-code-gen)
-        * htrace-core (org.apache.htrace:htrace-core:3.1.0-incubating - http://incubator.apache.org/projects/htrace.html)
         * htrace-core4 (org.apache.htrace:htrace-core4:4.2.0-incubating - http://incubator.apache.org/projects/htrace.html)
         * j2html (com.j2html:j2html:1.0.0 - http://j2html.com)
         * J2ObjC Annotations (com.google.j2objc:j2objc-annotations:1.1 - https://github.com/google/j2objc/)
         * Jackson (org.codehaus.jackson:jackson-core-asl:1.9.13 - http://jackson.codehaus.org)
         * Jackson-annotations (com.fasterxml.jackson.core:jackson-annotations:2.9.0 - http://github.com/FasterXML/jackson)
-        * Jackson-annotations (com.fasterxml.jackson.core:jackson-annotations:2.9.2 - http://github.com/FasterXML/jackson)
-        * Jackson-core (com.fasterxml.jackson.core:jackson-core:2.9.2 - https://github.com/FasterXML/jackson-core)
         * Jackson-core (com.fasterxml.jackson.core:jackson-core:2.9.8 - https://github.com/FasterXML/jackson-core)
-        * jackson-databind (com.fasterxml.jackson.core:jackson-databind:2.9.2 - http://github.com/FasterXML/jackson)
         * jackson-databind (com.fasterxml.jackson.core:jackson-databind:2.9.8 - http://github.com/FasterXML/jackson)
         * Jackson dataformat: Smile (com.fasterxml.jackson.dataformat:jackson-dataformat-smile:2.9.8 - http://github.com/FasterXML/jackson-dataformats-binary)
-        * Jackson-dataformat-YAML (com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.9.6 - https://github.com/FasterXML/jackson-dataformats-text)
-        * Jackson datatype: Guava (com.fasterxml.jackson.datatype:jackson-datatype-guava:2.9.6 - https://github.com/FasterXML/jackson-datatypes-collections)
-        * Jackson datatype: jdk8 (com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.9.6 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jdk8)
-        * Jackson datatype: JSR310 (com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.9.6 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jsr310)
-        * Jackson-datatype-Joda (com.fasterxml.jackson.datatype:jackson-datatype-joda:2.9.6 - http://wiki.fasterxml.com/JacksonModuleJoda)
+        * Jackson-dataformat-YAML (com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:2.9.8 - https://github.com/FasterXML/jackson-dataformats-text)
+        * Jackson datatype: Guava (com.fasterxml.jackson.datatype:jackson-datatype-guava:2.9.8 - https://github.com/FasterXML/jackson-datatypes-collections)
+        * Jackson datatype: jdk8 (com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.9.8 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jdk8)
+        * Jackson datatype: JSR310 (com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.9.8 - https://github.com/FasterXML/jackson-modules-java8/jackson-datatype-jsr310)
+        * Jackson-datatype-Joda (com.fasterxml.jackson.datatype:jackson-datatype-joda:2.9.8 - http://wiki.fasterxml.com/JacksonModuleJoda)
         * Jackson Integration for Metrics (io.dropwizard.metrics:metrics-json:3.1.0 - http://metrics.codahale.com/metrics-json/)
         * Jackson Integration for Metrics (io.dropwizard.metrics:metrics-json:4.0.2 - http://metrics.dropwizard.io/metrics-json)
-        * Jackson-JAXRS-base (com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.9.6 - http://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-base)
-        * Jackson-JAXRS-JSON (com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.9.6 - http://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-json-provider)
-        * Jackson module: Afterburner (com.fasterxml.jackson.module:jackson-module-afterburner:2.9.6 - https://github.com/FasterXML/jackson-modules-base)
-        * Jackson module: JAXB Annotations (com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.9.6 - https://github.com/FasterXML/jackson-modules-base)
-        * Jackson-module-parameter-names (com.fasterxml.jackson.module:jackson-module-parameter-names:2.9.6 - https://github.com/FasterXML/jackson-modules-java8/jackson-module-parameter-names)
+        * Jackson-JAXRS-base (com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.9.8 - http://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-base)
+        * Jackson-JAXRS-JSON (com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.9.8 - http://github.com/FasterXML/jackson-jaxrs-providers/jackson-jaxrs-json-provider)
+        * Jackson module: Afterburner (com.fasterxml.jackson.module:jackson-module-afterburner:2.9.8 - https://github.com/FasterXML/jackson-modules-base)
+        * Jackson module: JAXB Annotations (com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.9.8 - https://github.com/FasterXML/jackson-modules-base)
+        * Jackson-module-parameter-names (com.fasterxml.jackson.module:jackson-module-parameter-names:2.9.8 - https://github.com/FasterXML/jackson-modules-java8/jackson-module-parameter-names)
         * jasper-compiler (tomcat:jasper-compiler:5.5.23 - http://tomcat.apache.org/jasper-compiler)
         * jasper-runtime (tomcat:jasper-runtime:5.5.23 - http://tomcat.apache.org/jasper-runtime)
         * Java Authentication SPI for Containers (org.apache.geronimo.specs:geronimo-jaspic_1.0_spec:1.0 - http://geronimo.apache.org/maven/specs/geronimo-jaspic_1.0_spec/1.0)
@@ -876,8 +872,6 @@
         * Jettison (org.codehaus.jettison:jettison:1.1 - no url defined)
         * Jetty SSLEngine (org.mortbay.jetty:jetty-sslengine:6.1.26 - http://jetty.mortbay.org)
         * Joda-Time (joda-time:joda-time:2.3 - http://www.joda.org/joda-time/)
-        * Joda-Time (joda-time:joda-time:2.8.1 - http://www.joda.org/joda-time/)
-        * Joda-Time (joda-time:joda-time:2.9.9 - http://www.joda.org/joda-time/)
         * jOOL (org.jooq:jool:0.9.12 - http://nexus.sonatype.org/oss-repository-hosting.html/jool)
         * JPam (net.sf.jpam:jpam:1.1 - http://jpam.sf.net)
         * JSON.simple (com.googlecode.json-simple:json-simple:1.1 - http://code.google.com/p/json-simple/)
@@ -886,11 +880,6 @@
         * JVM Integration for Metrics (io.dropwizard.metrics:metrics-jvm:3.1.0 - http://metrics.codahale.com/metrics-jvm/)
         * JVM Integration for Metrics (io.dropwizard.metrics:metrics-jvm:4.0.2 - http://metrics.dropwizard.io/metrics-jvm)
         * LZ4 and xxHash (net.jpountz.lz4:lz4:1.3.0 - https://github.com/jpountz/lz4-java)
-        * Maven Aether Provider (org.apache.maven:maven-aether-provider:3.1.0 - http://maven.apache.org/ref/3.1.0/maven-aether-provider)
-        * Maven Model (org.apache.maven:maven-model:3.1.0 - http://maven.apache.org/ref/3.1.0/maven-model)
-        * Maven Model Builder (org.apache.maven:maven-model-builder:3.1.0 - http://maven.apache.org/ref/3.1.0/maven-model-builder)
-        * Maven Repository Metadata Model (org.apache.maven:maven-repository-metadata:3.1.0 - http://maven.apache.org/ref/3.1.0/maven-repository-metadata)
-        * Metrics Core (io.dropwizard.metrics:metrics-core:3.2.1 - http://metrics.codahale.com/metrics-core/)
         * Metrics Core (io.dropwizard.metrics:metrics-core:3.2.6 - http://metrics.dropwizard.io/metrics-core/)
         * Metrics Health Checks (io.dropwizard.metrics:metrics-healthchecks:4.0.2 - http://metrics.dropwizard.io/metrics-healthchecks)
         * Metrics Integration for Jersey 2.x (io.dropwizard.metrics:metrics-jersey2:4.0.2 - http://metrics.dropwizard.io/metrics-jersey2)
@@ -898,7 +887,6 @@
         * Metrics Integration for Logback (io.dropwizard.metrics:metrics-logback:4.0.2 - http://metrics.dropwizard.io/metrics-logback)
         * Metrics Integration with JMX (io.dropwizard.metrics:metrics-jmx:4.0.2 - http://metrics.dropwizard.io/metrics-jmx)
         * Metrics Utility Servlets (io.dropwizard.metrics:metrics-servlets:4.0.2 - http://metrics.dropwizard.io/metrics-servlets)
-        * Netty/All-in-One (io.netty:netty-all:4.0.23.Final - http://netty.io/netty-all/)
         * Nimbus JOSE+JWT (com.nimbusds:nimbus-jose-jwt:4.41.1 - https://bitbucket.org/connect2id/nimbus-jose-jwt)
         * Objenesis (org.objenesis:objenesis:2.1 - http://objenesis.org)
         * OkHttp (com.squareup.okhttp:okhttp:2.4.0 - https://github.com/square/okhttp/okhttp)
@@ -907,81 +895,61 @@
         * Open JSON (com.tdunning:json:1.8 - https://github.com/tdunning/open-json)
         * ORC Core (org.apache.orc:orc-core:1.3.3 - http://orc.apache.org/orc-core)
         * oro (oro:oro:2.0.8 - no url defined)
-        * Plexus :: Component Annotations (org.codehaus.plexus:plexus-component-annotations:1.5.5 - http://plexus.codehaus.org/plexus-containers/plexus-component-annotations/)
-        * Plexus Classworlds (org.codehaus.plexus:plexus-classworlds:2.4 - http://plexus.codehaus.org/plexus-classworlds/)
-        * Plexus Common Utilities (org.codehaus.plexus:plexus-utils:3.1.1 - http://codehaus-plexus.github.io/plexus-utils/)
-        * Plexus Interpolation API (org.codehaus.plexus:plexus-interpolation:1.16 - http://plexus.codehaus.org/plexus-components/plexus-interpolation)
-        * Sisu Guice - Core Library (org.sonatype.sisu:sisu-guice:3.1.0 - http://code.google.com/p/google-guice/sisu-guice/)
         * Slider Core (org.apache.slider:slider-core:0.90.2-incubating - http://slider.incubator.apache.org/slider-core/)
         * SnakeYAML (org.yaml:snakeyaml:1.11 - http://www.snakeyaml.org)
-        * SnakeYAML (org.yaml:snakeyaml:1.18 - http://www.snakeyaml.org)
         * Snappy for Java (org.xerial.snappy:snappy-java:1.0.5 - http://github.com/xerial/snappy-java/)
         * snappy-java (org.xerial.snappy:snappy-java:1.1.2.6 - https://github.com/xerial/snappy-java)
         * StAX API (stax:stax-api:1.0.1 - http://stax.codehaus.org/)
         * Tephra API (co.cask.tephra:tephra-api:0.6.0 - https://github.com/caskdata/tephra/tephra-api)
         * Tephra Core (co.cask.tephra:tephra-core:0.6.0 - https://github.com/caskdata/tephra/tephra-core)
         * Tephra HBase 1.0 Compatibility (co.cask.tephra:tephra-hbase-compat-1.0:0.6.0 - https://github.com/caskdata/tephra/tephra-hbase-compat-1.0)
-        * The Netty Project (io.netty:netty:3.7.0.Final - http://netty.io/)
         * zookeeper (org.apache.zookeeper:zookeeper:3.4.6 - no url defined)
-        
-        * jersey-container-grizzly2-http (org.glassfish.jersey.containers:jersey-container-grizzly2-http:2.27 - https://jersey.github.io/project/jersey-container-grizzly2-http/)
-        * jersey-container-grizzly2-servlet (org.glassfish.jersey.containers:jersey-container-grizzly2-servlet:2.27 - https://jersey.github.io/project/jersey-container-grizzly2-servlet/)
-        * jersey-container-servlet (org.glassfish.jersey.containers:jersey-container-servlet:2.27 - https://jersey.github.io/project/jersey-container-servlet/)
-        * jersey-container-servlet-core (org.glassfish.jersey.containers:jersey-container-servlet-core:2.27 - https://jersey.github.io/project/jersey-container-servlet-core/)
-        * jersey-core-client (org.glassfish.jersey.core:jersey-client:2.27 - https://jersey.github.io/jersey-client/)
-        * jersey-inject-hk2 (org.glassfish.jersey.inject:jersey-hk2:2.27 - https://jersey.github.io/project/jersey-hk2/)
-        * jersey-media-jaxb (org.glassfish.jersey.media:jersey-media-jaxb:2.27 - https://jersey.github.io/project/jersey-media-jaxb/)
-        * jersey-core-server (org.glassfish.jersey.core:jersey-server:2.27 - https://jersey.github.io/jersey-server/)
-        * jersey-core-common (org.glassfish.jersey.core:jersey-common:2.27 - https://jersey.github.io/jersey-common/)
+        * Log4j Implemented Over SLF4J (org.slf4j:log4j-over-slf4j:1.7.26 - http://www.slf4j.org)
         * Jetty :: Aggregate :: All core Jetty (org.eclipse.jetty.aggregate:jetty-all:7.6.0.v20120127 - http://www.eclipse.org/jetty/jetty-aggregate-project/jetty-all)
-        * Jetty :: Continuation (org.eclipse.jetty:jetty-continuation:9.4.11.v20180605 - http://www.eclipse.org/jetty)
         * Jetty :: Continuation (org.eclipse.jetty:jetty-continuation:9.4.14.v20181114 - http://www.eclipse.org/jetty)
-        * Jetty :: Http Utility (org.eclipse.jetty:jetty-http:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: Http Utility (org.eclipse.jetty:jetty-http:9.4.11.v20180605 - http://www.eclipse.org/jetty)
         * Jetty :: Http Utility (org.eclipse.jetty:jetty-http:9.4.14.v20181114 - http://www.eclipse.org/jetty)
-        * Jetty :: IO Utility (org.eclipse.jetty:jetty-io:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: IO Utility (org.eclipse.jetty:jetty-io:9.4.11.v20180605 - http://www.eclipse.org/jetty)
-        * Jetty :: IO Utility (org.eclipse.jetty:jetty-io:9.4.14.v20181114 - http://www.eclipse.org/jetty)
-        * Jetty :: Security (org.eclipse.jetty:jetty-security:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: Security (org.eclipse.jetty:jetty-security:9.4.11.v20180605 - http://www.eclipse.org/jetty)
+        * Jetty :: IO Utility (org.eclipse.jetty:jetty-io:9.4.14.v20181114 - http://www.eclipse.org/jetty)tty)
         * Jetty :: Security (org.eclipse.jetty:jetty-security:9.4.14.v20181114 - http://www.eclipse.org/jetty)
-        * Jetty :: Server Core (org.eclipse.jetty:jetty-server:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: Server Core (org.eclipse.jetty:jetty-server:9.4.11.v20180605 - http://www.eclipse.org/jetty)
         * Jetty :: Server Core (org.eclipse.jetty:jetty-server:9.4.14.v20181114 - http://www.eclipse.org/jetty)
-        * Jetty :: Servlet Handling (org.eclipse.jetty:jetty-servlet:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: Servlet Handling (org.eclipse.jetty:jetty-servlet:9.4.11.v20180605 - http://www.eclipse.org/jetty)
         * Jetty :: Servlet Handling (org.eclipse.jetty:jetty-servlet:9.4.14.v20181114 - http://www.eclipse.org/jetty)
         * Jetty :: SetUID Java (org.eclipse.jetty.toolchain.setuid:jetty-setuid-java:1.0.3 - http://www.eclipse.org/jetty/jetty-toolchain/jetty-setuid-parent/jetty-setuid-java)
-        * Jetty :: Utilities :: Ajax(JSON) (org.eclipse.jetty:jetty-util-ajax:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: Utilities (org.eclipse.jetty:jetty-util:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: Utilities (org.eclipse.jetty:jetty-util:9.4.11.v20180605 - http://www.eclipse.org/jetty)
         * Jetty :: Utilities (org.eclipse.jetty:jetty-util:9.4.14.v20181114 - http://www.eclipse.org/jetty)
-        * Jetty :: Utility Servlets and Filters (org.eclipse.jetty:jetty-servlets:9.4.11.v20180605 - http://www.eclipse.org/jetty)
+        * Jetty :: Utilities :: Ajax(JSON) (org.eclipse.jetty:jetty-util-ajax:9.4.14.v20181114 - http://www.eclipse.org/jetty)
         * Jetty :: Utility Servlets and Filters (org.eclipse.jetty:jetty-servlets:9.4.14.v20181114 - http://www.eclipse.org/jetty)
-        * Jetty :: Webapp Application Support (org.eclipse.jetty:jetty-webapp:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: Webapp Application Support (org.eclipse.jetty:jetty-webapp:9.4.11.v20180605 - http://www.eclipse.org/jetty)
-        * Jetty :: XML utilities (org.eclipse.jetty:jetty-xml:9.3.25.v20180904 - http://www.eclipse.org/jetty)
-        * Jetty :: XML utilities (org.eclipse.jetty:jetty-xml:9.4.11.v20180605 - http://www.eclipse.org/jetty)
+        * Jetty :: Webapp Application Support (org.eclipse.jetty:jetty-webapp:9.4.14.v20181114 - http://www.eclipse.org/jetty)
+        * Jetty :: XML utilities (org.eclipse.jetty:jetty-xml:9.4.14.v20181114 - http://www.eclipse.org/jetty)
         * Jetty Orbit :: Servlet API (org.eclipse.jetty.orbit:javax.servlet:3.0.0.v201112011016 - http://www.eclipse.org/jetty/jetty-orbit/javax.servlet)
         * Jetty Server (org.mortbay.jetty:jetty:6.1.26 - http://www.eclipse.org/jetty/jetty-parent/project/modules/jetty)
         * Jetty Utilities (org.mortbay.jetty:jetty-util:6.1.26 - http://www.eclipse.org/jetty/jetty-parent/project/jetty-util)
-        * RocksDB JNI (org.rocksdb:rocksdbjni:5.8.6 - http://rocksdb.org/)
         * JAX-RS provider for JSON content type (org.codehaus.jackson:jackson-jaxrs:1.9.13 - http://jackson.codehaus.org)
         * Xml Compatibility extensions for Jackson (org.codehaus.jackson:jackson-xc:1.9.13 - http://jackson.codehaus.org)
-        * Javassist (org.javassist:javassist:3.20.0-GA - http://www.javassist.org/)
         * Javassist (org.javassist:javassist:3.22.0-GA - http://www.javassist.org/)
-        
-        * JCTools Core (org.jctools:jctools-core:jar:2.0.1 - http://jctools.github.io/JCTools/)
+        * RocksDB JNI (org.rocksdb:rocksdbjni:5.18.3 - http://rocksdb.org/)
+        * JCTools Core (org.jctools:jctools-core:2.0.1 - http://jctools.github.io/JCTools/)
+        * Bean Validation API (javax.validation:validation-api:2.0.1.Final - http://beanvalidation.org)
+        * jersey-container-grizzly2-http (org.glassfish.jersey.containers:jersey-container-grizzly2-http:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-grizzly2-http)
+        * jersey-container-grizzly2-servlet (org.glassfish.jersey.containers:jersey-container-grizzly2-servlet:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-grizzly2-servlet)
+        * jersey-container-servlet (org.glassfish.jersey.containers:jersey-container-servlet:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-servlet)
+        * jersey-container-servlet-core (org.glassfish.jersey.containers:jersey-container-servlet-core:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-container-servlet-core)
+        * jersey-core-client (org.glassfish.jersey.core:jersey-client:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/jersey-client)
+        * jersey-ext-bean-validation (org.glassfish.jersey.ext:jersey-bean-validation:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-bean-validation)
+        * jersey-ext-metainf-services (org.glassfish.jersey.ext:jersey-metainf-services:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-metainf-services)
+        * jersey-inject-hk2 (org.glassfish.jersey.inject:jersey-hk2:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-hk2)
+        * jersey-media-jaxb (org.glassfish.jersey.media:jersey-media-jaxb:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/project/jersey-media-jaxb)
+        * jersey-core-server (org.glassfish.jersey.core:jersey-server:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/jersey-server)
+        * jersey-core-common (org.glassfish.jersey.core:jersey-common:2.29 - https://projects.eclipse.org/projects/ee4j.jersey/jersey-common)
+        * Hibernate Validator Engine (org.hibernate:hibernate-validator:5.4.2.Final - http://hibernate.org/validator/hibernate-validator)
+        * Hibernate Validator Engine (org.hibernate.validator:hibernate-validator:6.0.17.Final - http://hibernate.org/validator/hibernate-validator)
 
     BSD 3-Clause License
 
         * ASM Commons (asm:asm-commons:3.1 - http://asm.objectweb.org/asm-commons/)
         * ASM Core (asm:asm:3.1 - http://asm.objectweb.org/asm/)
         * ASM Tree (asm:asm-tree:3.1 - http://asm.objectweb.org/asm-tree/)
+        * ASM Core (org.ow2.asm:asm:5.0.4 - http://asm.objectweb.org/asm/)
         * leveldbjni-all (org.fusesource.leveldbjni:leveldbjni-all:1.8 - http://leveldbjni.fusesource.org/leveldbjni-all)
         * ANTLR 3 Runtime (org.antlr:antlr-runtime:3.5.2 - http://www.antlr.org)
         * ANTLR ST4 4.0.4 (org.antlr:ST4:4.0.4 - http://www.stringtemplate.org)
-        * ASM Core (asm:asm:3.3.1 - http://asm.objectweb.org/asm/)
         * ASM Core (org.ow2.asm:asm:5.0.3 - http://asm.objectweb.org/asm/)
         * ParaNamer Core (com.thoughtworks.paranamer:paranamer:2.3 - http://paranamer.codehaus.org/paranamer)
         * xmlenc Library (xmlenc:xmlenc:0.52 - http://xmlenc.sourceforge.net)
@@ -997,100 +965,81 @@
     BSD 2-Clause License
     
         * JLine (jline:jline:0.9.94 - http://jline.sourceforge.net)
-        * JLine (jline:jline:2.12 - http://nexus.sonatype.org/oss-repository-hosting.html/jline)    
         * Javolution (javolution:javolution:5.5.1 - http://javolution.org)
 
     Common Development and Distribution License
 
-        * aopalliance version 1.0 repackaged as a module (org.glassfish.hk2.external:aopalliance-repackaged:2.5.0-b32 - https://hk2.java.net/external/aopalliance-repackaged)
-        * aopalliance version 1.0 repackaged as a module (org.glassfish.hk2.external:aopalliance-repackaged:2.5.0-b42 - https://github.com/hk2-project/hk2/external/aopalliance-repackaged)
         * Expression Language 3.0 (org.glassfish:javax.el:3.0.0 - http://el-spec.java.net)
         * Expression Language 3.0 (org.glassfish:javax.el:3.0.1-b11 - http://uel.java.net)
-        * grizzly-framework (org.glassfish.grizzly:grizzly-framework:2.4.3 - http://grizzly.java.net/grizzly-framework)
-        * grizzly-http (org.glassfish.grizzly:grizzly-http:2.4.3 - http://grizzly.java.net/grizzly-http)
-        * grizzly-http-server (org.glassfish.grizzly:grizzly-http-server:2.4.3 - http://grizzly.java.net/grizzly-http-server)
-        * grizzly-http-servlet (org.glassfish.grizzly:grizzly-http-servlet:2.4.0 - http://grizzly.java.net/grizzly-http-servlet)
-        * HK2 API module (org.glassfish.hk2:hk2-api:2.5.0-b32 - https://hk2.java.net/hk2-api)
-        * HK2 API module (org.glassfish.hk2:hk2-api:2.5.0-b42 - https://github.com/hk2-project/hk2/hk2-api)
-        * HK2 Implementation Utilities (org.glassfish.hk2:hk2-utils:2.5.0-b32 - https://hk2.java.net/hk2-utils)
-        * HK2 Implementation Utilities (org.glassfish.hk2:hk2-utils:2.5.0-b42 - https://github.com/hk2-project/hk2/hk2-utils)
         * JavaServer Pages(TM) API (javax.servlet.jsp:javax.servlet.jsp-api:2.3.1 - http://jsp.java.net)
         * Java Servlet API (javax.servlet:javax.servlet-api:3.1.0 - http://servlet-spec.java.net)
-        * Java Servlet API (javax.servlet:javax.servlet-api:4.0.0 - https://javaee.github.io/servlet-spec/)
-        * javax.annotation API (javax.annotation:javax.annotation-api:1.2 - http://jcp.org/en/jsr/detail?id=250)
-        * javax.annotation API (javax.annotation:javax.annotation-api:1.3.2 - http://jcp.org/en/jsr/detail?id=250)
-        * javax.inject:1 as OSGi bundle (org.glassfish.hk2.external:javax.inject:2.5.0-b32 - https://hk2.java.net/external/javax.inject)
-        * javax.inject:1 as OSGi bundle (org.glassfish.hk2.external:javax.inject:2.5.0-b42 - https://github.com/hk2-project/hk2/external/javax.inject)
-        * jersey-container-servlet-core (org.glassfish.jersey.containers:jersey-container-servlet-core:2.25.1 - https://jersey.java.net/project/jersey-container-servlet-core/)
-        * jersey-core-client (org.glassfish.jersey.core:jersey-client:2.25.1 - https://jersey.java.net/jersey-client/)
-        * jersey-core-common (org.glassfish.jersey.core:jersey-common:2.25.1 - https://jersey.java.net/jersey-common/)
-        * jersey-core-server (org.glassfish.jersey.core:jersey-server:2.25.1 - https://jersey.java.net/jersey-server/)
-        * jersey-media-jaxb (org.glassfish.jersey.media:jersey-media-jaxb:2.25.1 - https://jersey.java.net/project/jersey-media-jaxb/)
-        * jersey-repackaged-guava (org.glassfish.jersey.bundles.repackaged:jersey-guava:2.25.1 - https://jersey.java.net/project/project/jersey-guava/)
         * JSP implementation (org.glassfish.web:javax.servlet.jsp:2.3.2 - http://jsp.java.net)
-        * OSGi resource locator bundle - used by various API providers that rely on META-INF/services mechanism to locate providers. (org.glassfish.hk2:osgi-resource-locator:1.0.1 - http://glassfish.org/osgi-resource-locator/)
-        * ServiceLocator Default Implementation (org.glassfish.hk2:hk2-locator:2.5.0-b32 - https://hk2.java.net/hk2-locator)
-        * ServiceLocator Default Implementation (org.glassfish.hk2:hk2-locator:2.5.0-b42 - https://github.com/hk2-project/hk2/hk2-locator)
+        * javax.annotation API (javax.annotation:javax.annotation-api:1.3.2 - http://jcp.org/en/jsr/detail?id=250)
 
     Common Development and Distribution License (CDDL) v1.0
 
         * JavaBeans(TM) Activation Framework (javax.activation:activation:1.1.1 - http://java.sun.com/javase/technologies/desktop/javabeans/jaf/index.jsp)
         * JavaMail API (javax.mail:mail:1.4.1 - https://glassfish.dev.java.net/javaee5/mail/)
-        * JSR-250 Common Annotations for the JavaTM Platform (javax.annotation:jsr250-api:1.0 - http://jcp.org/aboutJava/communityprocess/final/jsr250/index.html)
 
     Common Development and Distribution License (CDDL) v1.1
 
         * Java Message Service (javax.jms:jms:1.1 - http://java.sun.com/products/jms)
         * Java Transaction API (javax.transaction:jta:1.1 - http://java.sun.com/products/jta)
-        * jersey-bom (org.glassfish.jersey:jersey-bom:2.27 - http://java.net/jersey-bom/)
         * jsp-api (javax.servlet:jsp-api:2.0 - no url defined)
         * jsp-api (javax.servlet.jsp:jsp-api:2.1 - no url defined)
         * servlet-api (javax.servlet:servlet-api:2.5 - no url defined)
         * transaction-api (javax.transaction:transaction-api:1.1 - no url defined)
         * javax.ws.rs-api (javax.ws.rs:javax.ws.rs-api:2.0.1 - http://jax-rs-spec.java.net)
-        * javax.ws.rs-api (javax.ws.rs:javax.ws.rs-api:2.1 - http://jax-rs-spec.java.net)
         * jaxb-api (javax.xml.bind:jaxb-api:2.3.0 - https://github.com/javaee/jaxb-spec/jaxb-api)
         * JAXB RI (com.sun.xml.bind:jaxb-impl:2.2.3-1 - http://jaxb.java.net/)
         * jersey-client (com.sun.jersey:jersey-client:1.9 - https://jersey.java.net/jersey-client/)
         * jersey-core (com.sun.jersey:jersey-core:1.9 - https://jersey.java.net/jersey-core/)
-        * jersey-grizzly2 (com.sun.jersey:jersey-grizzly2:1.19.4 - https://jersey.java.net/jersey-grizzly2/)
         * jersey-guice (com.sun.jersey.contribs:jersey-guice:1.9 - https://jersey.java.net/jersey-contribs/jersey-guice/)
         * jersey-json (com.sun.jersey:jersey-json:1.9 - https://jersey.java.net/jersey-json/)
         * jersey-server (com.sun.jersey:jersey-server:1.9 - https://jersey.java.net/jersey-server/)
 
     Eclipse Public License, Version 1.0
-
-        * Aether API (org.eclipse.aether:aether-api:1.1.0 - http://www.eclipse.org/aether/aether-api/)
-        * Aether Connector Basic (org.eclipse.aether:aether-connector-basic:1.1.0 - http://www.eclipse.org/aether/aether-connector-basic/)
-        * Aether Implementation (org.eclipse.aether:aether-impl:1.1.0 - http://www.eclipse.org/aether/aether-impl/)
-        * Aether SPI (org.eclipse.aether:aether-spi:1.1.0 - http://www.eclipse.org/aether/aether-spi/)
-        * Aether Transport File (org.eclipse.aether:aether-transport-file:1.1.0 - http://www.eclipse.org/aether/aether-transport-file/)
-        * Aether Transport HTTP (org.eclipse.aether:aether-transport-http:1.1.0 - http://www.eclipse.org/aether/aether-transport-http/)
-        * Aether Utilities (org.eclipse.aether:aether-util:1.1.0 - http://www.eclipse.org/aether/aether-util/)
-        * clojure (org.clojure:clojure:1.3.0-beta1 - http://clojure.org/)
-        * JUnit (junit:junit:4.12 - http://junit.org)
-        * org.eclipse.sisu.inject (org.eclipse.sisu:org.eclipse.sisu.inject:0.0.0.M2a - http://www.eclipse.org/sisu/org.eclipse.sisu.inject/)
-        * org.eclipse.sisu.plexus (org.eclipse.sisu:org.eclipse.sisu.plexus:0.0.0.M2a - http://www.eclipse.org/sisu/org.eclipse.sisu.plexus/)
+    
         * tools.logging (org.clojure:tools.logging:0.2.3 - http://nexus.sonatype.org/oss-repository-hosting.html/pom.contrib/tools.logging)
+        * clojure (org.clojure:clojure:1.10.0 - http://clojure.org/)
+        * core.specs.alpha (org.clojure:core.specs.alpha:0.2.44 - https://github.com/clojure/build.poms/core.specs.alpha)
+        * spec.alpha (org.clojure:spec.alpha:0.2.176 - https://github.com/clojure/build.poms/spec.alpha)
+        * JGraphT (org.jgrapht:jgrapht-core:0.9.0 - https://jgrapht.org/)
         
-        * JGraphT (org.jgrapht:jgrapht-core:jar:0.9.0 - https://jgrapht.org/)
+    Eclipse Public License, Version 2.0
+    
+        * jakarta.annotation API (jakarta.annotation:jakarta.annotation-api:1.3.4 - https://projects.eclipse.org/projects/ee4j.ca)
+        * Expression Language 3.0 API (jakarta.el:jakarta.el-api:3.0.2 - https://projects.eclipse.org/projects/ee4j.el)
+        * Java Servlet API (jakarta.servlet:jakarta.servlet-api:4.0.2 - https://projects.eclipse.org/projects/ee4j.servlet)
+        * javax.ws.rs-api (jakarta.ws.rs:jakarta.ws.rs-api:2.1.5 - https://github.com/eclipse-ee4j/jaxrs-api)
+        * grizzly-framework (org.glassfish.grizzly:grizzly-framework:2.4.4 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-framework)
+        * grizzly-http (org.glassfish.grizzly:grizzly-http:2.4.4 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-http)
+        * grizzly-http-server (org.glassfish.grizzly:grizzly-http-server:2.4.4 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-http-server)
+        * grizzly-http-servlet (org.glassfish.grizzly:grizzly-http-servlet:2.4.4 - https://projects.eclipse.org/projects/ee4j.grizzly/grizzly-http-servlet)
+        * aopalliance version 1.0 repackaged as a module (org.glassfish.hk2.external:aopalliance-repackaged:2.5.0 - https://github.com/eclipse-ee4j/glassfish-hk2/external/aopalliance-repackaged)
+        * HK2 API module (org.glassfish.hk2:hk2-api:2.5.0 - https://github.com/eclipse-ee4j/glassfish-hk2/hk2-api)
+        * HK2 Implementation Utilities (org.glassfish.hk2:hk2-utils:2.5.0 - https://github.com/eclipse-ee4j/glassfish-hk2/hk2-utils)
+        * javax.inject:1 as OSGi bundle (org.glassfish.hk2.external:jakarta.inject:2.5.0 - https://github.com/eclipse-ee4j/glassfish-hk2/external/jakarta.inject)
+        * OSGi resource locator (org.glassfish.hk2:osgi-resource-locator:1.0.3 - https://projects.eclipse.org/projects/ee4j/osgi-resource-locator)
+        * ServiceLocator Default Implementation (org.glassfish.hk2:hk2-locator:2.5.0 - https://github.com/eclipse-ee4j/glassfish-hk2/hk2-locator)
+        * Expression Language 3.0 (org.glassfish:jakarta.el:3.0.2 - https://projects.eclipse.org/projects/ee4j.el)
+        
+    Eclipse Distribution License, Version 1.0
+        * JavaBeans Activation Framework (com.sun.activation:jakarta.activation:1.2.1 - https://github.com/eclipse-ee4j/jaf/jakarta.activation)
+        * JavaBeans Activation Framework API jar (jakarta.activation:jakarta.activation-api:1.2.1 - https://github.com/eclipse-ee4j/jaf/jakarta.activation-api)
+        * jakarta.xml.bind-api (jakarta.xml.bind:jakarta.xml.bind-api:2.3.2 - https://github.com/eclipse-ee4j/jaxb-api/jakarta.xml.bind-api)
 
     MIT License
 
         * Animal Sniffer Annotations (org.codehaus.mojo:animal-sniffer-annotations:1.17 - http://www.mojohaus.org/animal-sniffer/animal-sniffer-annotations)
         * argparse4j (net.sourceforge.argparse4j:argparse4j:0.8.1 - http://argparse4j.github.io)
         * Checker Qual (org.checkerframework:checker-qual:2.5.2 - https://checkerframework.org)
-        * JCL 1.1.1 implemented over SLF4J (org.slf4j:jcl-over-slf4j:1.6.2 - http://www.slf4j.org)
-        * JCL 1.2 implemented over SLF4J (org.slf4j:jcl-over-slf4j:1.7.25 - http://www.slf4j.org)
         * JCodings (org.jruby.jcodings:jcodings:1.0.18 - http://nexus.sonatype.org/oss-repository-hosting.html/jcodings)
         * Joni (org.jruby.joni:joni:2.1.11 - http://nexus.sonatype.org/oss-repository-hosting.html/joni)
+        * JCL 1.2 implemented over SLF4J (org.slf4j:jcl-over-slf4j:1.7.25 - http://www.slf4j.org)
         * JUL to SLF4J bridge (org.slf4j:jul-to-slf4j:1.7.25 - http://www.slf4j.org)
-        * SLF4J API Module (org.slf4j:slf4j-api:1.6.2 - http://www.slf4j.org)
-        * SLF4J API Module (org.slf4j:slf4j-api:1.7.13 - http://www.slf4j.org)
-        * SLF4J API Module (org.slf4j:slf4j-api:1.7.21 - http://www.slf4j.org)
-        * SLF4J API Module (org.slf4j:slf4j-api:1.7.25 - http://www.slf4j.org)
-        * SLF4J LOG4J-12 Binding (org.slf4j:slf4j-log4j12:1.6.6 - http://www.slf4j.org)
-        
+        * SLF4J API Module (org.slf4j:slf4j-api:1.7.26 - http://www.slf4j.org)
+        * SLF4J LOG4J-12 Binding (org.slf4j:slf4j-log4j12:1.7.26 - http://www.slf4j.org)
         * Sysout over SLF4J (uk.org.lidalia:sysout-over-slf4j:1.0.2 - http://projects.lidalia.org.uk/sysout-over-slf4j/)
 
     Mozilla Public License Version 2.0
@@ -1100,4 +1049,3 @@
     Public Domain
 
         * AOP alliance (aopalliance:aopalliance:1.0 - http://aopalliance.sourceforge.net)
-        * XZ for Java (org.tukaani:xz:1.0 - http://tukaani.org/xz/java.html)
diff --git a/bin/flight.bash b/bin/flight.bash
index da15419..ecd76ab 100755
--- a/bin/flight.bash
+++ b/bin/flight.bash
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -164,5 +164,3 @@
 else
     usage_and_quit
 fi
-
-
diff --git a/bin/storm b/bin/storm
index fa4c40d..f30f05f 100755
--- a/bin/storm
+++ b/bin/storm
@@ -34,7 +34,9 @@
 done
 
 # check for version
-PYTHON="/usr/bin/env python"
+if [ -z $PYTHON ]; then
+  PYTHON="/usr/bin/env python"
+fi
 majversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f1`
 minversion=`$PYTHON -V 2>&1 | awk '{print $2}' | cut -d'.' -f2`
 numversion=$(( 10 * $majversion + $minversion))
diff --git a/conf/defaults.yaml b/conf/defaults.yaml
index 8434b1d..e555ac8 100644
--- a/conf/defaults.yaml
+++ b/conf/defaults.yaml
@@ -144,7 +144,7 @@
 nimbus.blobstore.expiration.secs: 600
 
 storm.blobstore.inputstream.buffer.size.bytes: 65536
-storm.blobstore.dependency.jar.upload.chuck.size.bytes: 1048576
+storm.blobstore.dependency.jar.upload.chunk.size.bytes: 1048576
 client.blobstore.class: "org.apache.storm.blobstore.NimbusBlobStore"
 storm.blobstore.replication.factor: 3
 # For secure mode we would want to change this config to true
@@ -336,12 +336,14 @@
 resource.aware.scheduler.priority.strategy: "org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy"
 topology.ras.constraint.max.state.search: 10_000     # The maximum number of states that will be searched looking for a solution in the constraint solver strategy
 resource.aware.scheduler.constraint.max.state.search: 100_000 # Daemon limit on maximum number of states that will be searched looking for a solution in the constraint solver strategy
+topology.ras.one.executor.per.worker: false
 
 blacklist.scheduler.tolerance.time.secs: 300
 blacklist.scheduler.tolerance.count: 3
 blacklist.scheduler.resume.time.secs: 1800
 blacklist.scheduler.reporter: "org.apache.storm.scheduler.blacklist.reporters.LogReporter"
 blacklist.scheduler.strategy: "org.apache.storm.scheduler.blacklist.strategies.DefaultBlacklistStrategy"
+blacklist.scheduler.assume.supervisor.bad.based.on.bad.slot: true
 
 dev.zookeeper.path: "/tmp/dev-storm-zookeeper"
 
diff --git a/conf/storm-env.sh b/conf/storm-env.sh
index d66aea0..5a9eb06 100644
--- a/conf/storm-env.sh
+++ b/conf/storm-env.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 #
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
diff --git a/dev-tools/list_jars.sh b/dev-tools/list_jars.sh
deleted file mode 100644
index fd36ef9..0000000
--- a/dev-tools/list_jars.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env bash
-################################################################################
-#  Licensed to the Apache Software Foundation (ASF) under one
-#  or more contributor license agreements.  See the NOTICE file
-#  distributed with this work for additional information
-#  regarding copyright ownership.  The ASF licenses this file
-#  to you under the Apache License, Version 2.0 (the
-#  "License"); you may not use this file except in compliance
-#  with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-# limitations under the License.
-################################################################################
-
-# This script print the names of jars in the target directories and subdirectories.
-#
-# Listing the jars in the binary distribution helps validate that the LICENSE-binary file is complete.
-
-set -Eeuo pipefail
-
-SRC=${1:-.}
-
-USAGE="list_jars <SOURCE_DIRECTORY:-.>"
-
-if [ "${SRC}" = "-h" ]; then
-	echo "${USAGE}"
-	exit 0
-fi
-
-JARS=""
-for dir in $@
-do
-  JARS+=`find -L "${SRC}" -name "*.jar" -printf "%f\n"`
-  JARS+=$'\n'
-done
-echo "$JARS" | sort | uniq
diff --git a/dev-tools/travis/travis-script.sh b/dev-tools/travis/travis-script.sh
index f7d582c..b743688 100755
--- a/dev-tools/travis/travis-script.sh
+++ b/dev-tools/travis/travis-script.sh
@@ -27,6 +27,9 @@
 if [ "$2" == "Integration-Test" ]
   then
   exec ./integration-test/run-it.sh
+elif [ "$2" == "Check-Updated-License-Files" ]
+  then
+  exec python3.6 dev-tools/validate-license-files.py --skip-build-storm
 elif [ "$2" == "Client" ]
 then
   TEST_MODULES=storm-client
diff --git a/dev-tools/validate-license-files.py b/dev-tools/validate-license-files.py
new file mode 100755
index 0000000..12c6e21
--- /dev/null
+++ b/dev-tools/validate-license-files.py
@@ -0,0 +1,175 @@
+#!/usr/bin/env python3
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from contextlib import contextmanager
+from random import randint
+from pathlib import Path
+import os
+import subprocess
+import shlex
+import filecmp
+import shutil
+import re
+import itertools
+import argparse
+
+project_root = Path(__file__).resolve().parent.parent
+update_dependency_licenses_cmd = ('mvn license:aggregate-add-third-party@generate-and-check-licenses' +
+                                  ' -Dlicense.skipAggregateAddThirdParty=false -B')
+
+
+@contextmanager
+def cd(newdir):
+    prevdir = Path.cwd()
+    os.chdir(newdir.expanduser())
+    try:
+        yield
+    finally:
+        os.chdir(prevdir)
+
+
+def generate_dependency_licenses():
+    """Generates DEPENDENCY-LICENSES in target. The committed DEPENDENCY-LICENSES is not modified."""
+    print('Generating DEPENDENCY-LICENSES')
+    update_dependency_licenses_output_to_target_cmd = (update_dependency_licenses_cmd +
+                                                       ' -Dlicense.thirdPartyFilename=DEPENDENCY-LICENSES' +
+                                                       ' -Dlicense.outputDirectory=target')
+    subprocess.check_call(shlex.split(
+        update_dependency_licenses_output_to_target_cmd))
+    print('Done generating DEPENDENCY-LICENSES')
+
+
+def check_dependency_licenses():
+    """Compares the regenerated DEPENDENCY-LICENSES in target with the DEPENDENCY-LICENSES in the root, and verifies that they are identical"""
+    print('Checking DEPENDENCY-LICENSES')
+    if (not filecmp.cmp(Path('DEPENDENCY-LICENSES'), Path('target') / 'DEPENDENCY-LICENSES', shallow=False)):
+        print(
+            f"DEPENDENCY-LICENSES and target/DEPENDENCY-LICENSES are different. Please update DEPENDENCY-LICENSES by running '{update_dependency_licenses_cmd}' in the project root")
+        return False
+    return True
+
+
+def build_storm():
+    print("Building Storm")
+    subprocess.check_call(shlex.split(
+        'mvn clean install -B -DskipTests -Dcheckstyle.skip -Dpmd.skip'
+    ))
+    print("Done building Storm")
+
+
+def extract_license_report_maven_coordinates(lines):
+    # Lines like " * Checker Qual (org.checkerframework:checker-qual:2.5.2 - https://checkerframework.org)"
+    matches = map(lambda line: re.match(
+        r'\s+\*.*\((?P<gav>.*) \- .*\).*', line), lines)
+    return set(map(lambda match: match.group('gav'), filter(lambda match: match != None, matches)))
+
+
+def parse_license_binary_dependencies_coordinate_set():
+    """Gets the dependencies listed in LICENSE-binary"""
+    license_binary_begin_binary_section = '----------------------------END OF SOURCE NOTICES -------------------------------------------'
+    license_binary_lines = read_lines(project_root / 'LICENSE-binary')
+    return extract_license_report_maven_coordinates(
+        itertools.dropwhile(lambda line: license_binary_begin_binary_section not in line, license_binary_lines))
+
+
+def extract_dependency_list_maven_coordinates(lines):
+    # Lines like "   com.google.code.findbugs:jsr305:jar:3.0.2 -- module jsr305 (auto)"
+    matches = map(lambda line: re.match(
+        r'\s+(?P<group>\S*)\:(?P<artifact>\S*)\:(?P<type>\S*)\:(?P<version>\S*)', line), lines)
+    return set(map(lambda match: match.group('group') + ':' + match.group('artifact') + ':' + match.group('version'), filter(lambda match: match != None, matches)))
+
+
+def read_lines(path):
+    with open(path) as f:
+        return f.readlines()
+
+
+def generate_storm_dist_dependencies_coordinate_set():
+    """Gets the dependencies for storm-dist/binary, plus the dependencies of storm-shaded-deps"""
+    generated_coordinate_set = extract_license_report_maven_coordinates(read_lines(
+        project_root / 'storm-dist' / 'binary' / 'target' / 'generated-sources' / 'license' / 'THIRD-PARTY.txt'))
+
+    # Add dependencies from storm-shaded-deps
+    with cd(project_root / 'storm-shaded-deps'):
+        print("Generating dependency list for storm-shaded-deps")
+        subprocess.check_call(shlex.split(
+            'mvn dependency:list -DoutputFile=target/deps-list -Dmdep.outputScope=false -DincludeScope=compile -B'))
+        print("Done generating dependency list for storm-shaded-deps")
+    shaded_dep_coordinates = extract_dependency_list_maven_coordinates(
+        read_lines(project_root / 'storm-shaded-deps' / 'target' / 'deps-list'))
+    shaded_dep_coordinates = set(filter(lambda coordinate: 'org.apache.storm:' not in coordinate, shaded_dep_coordinates))
+    print('The storm-shaded-deps dependencies that are included when distributing storm-dist/binary are ' + str(shaded_dep_coordinates))
+    print('')
+    generated_coordinate_set.update(shaded_dep_coordinates)
+
+    return generated_coordinate_set
+
+def generate_storm_dist_license_report():
+    with cd(project_root / 'storm-dist' / 'binary'):
+        print('')
+        print('Generating storm-dist license report')
+        subprocess.check_call(shlex.split(update_dependency_licenses_cmd))
+        print('Done generating storm-dist license report')
+
+def make_license_binary_checker():
+    """Checks that the dependencies in the storm-dist/binary license report are mentioned in LICENSE-binary, and vice versa."""
+    print('Checking LICENSE-binary')
+
+    license_binary_coordinate_set = parse_license_binary_dependencies_coordinate_set()
+    generated_coordinate_set = generate_storm_dist_dependencies_coordinate_set()
+    superfluous_coordinates_in_license = license_binary_coordinate_set.difference(
+        generated_coordinate_set)
+    coordinates_missing_in_license = generated_coordinate_set.difference(
+        license_binary_coordinate_set)
+    print('Done checking LICENSE-binary')
+    def check_for_errors():
+        if superfluous_coordinates_in_license:
+            print('Dependencies in LICENSE-binary that appear unused: ')
+            for coord in sorted(superfluous_coordinates_in_license):
+                print(coord)
+        print('')
+        if coordinates_missing_in_license:
+            print('Dependencies missing from LICENSE-binary: ')
+            for coord in sorted(coordinates_missing_in_license):
+                print(coord)
+        any_wrong_coordinates = coordinates_missing_in_license or superfluous_coordinates_in_license
+        if any_wrong_coordinates:
+            print('LICENSE-binary needs to be updated. Please remove any unnecessary dependencies from LICENSE-binary, and add any that are missing. You can copy any missing dependencies from DEPENDENCY-LICENSES')
+        return not any_wrong_coordinates
+    return check_for_errors
+
+
+with cd(project_root):
+    parser = argparse.ArgumentParser(description='Validate that the Storm license files are up to date (excluding NOTICE-binary and the licenses/ directory)')
+    parser.add_argument('--skip-build-storm', action='store_true', help='set to skip building Storm')
+    args = parser.parse_args()
+    success = True
+
+    if not args.skip_build_storm:
+        build_storm()
+    generate_dependency_licenses()
+    generate_storm_dist_license_report()
+    license_binary_checker = make_license_binary_checker()
+    success = check_dependency_licenses() and success
+    success = license_binary_checker() and success
+    if not success:
+        print('Some license files are not up to date, see above for the relevant error message')
+        exit(1)
+    print('License files are up to date')
+    exit(0)
diff --git a/examples/storm-elasticsearch-examples/pom.xml b/examples/storm-elasticsearch-examples/pom.xml
index d25a747..da7b629 100644
--- a/examples/storm-elasticsearch-examples/pom.xml
+++ b/examples/storm-elasticsearch-examples/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -94,9 +94,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-hbase-examples/pom.xml b/examples/storm-hbase-examples/pom.xml
index 65b71fe..9b178a6 100644
--- a/examples/storm-hbase-examples/pom.xml
+++ b/examples/storm-hbase-examples/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -86,9 +86,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-hdfs-examples/pom.xml b/examples/storm-hdfs-examples/pom.xml
index 697a22c..153bee9 100644
--- a/examples/storm-hdfs-examples/pom.xml
+++ b/examples/storm-hdfs-examples/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -96,9 +96,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-hive-examples/pom.xml b/examples/storm-hive-examples/pom.xml
index 10e2112..045ca68 100644
--- a/examples/storm-hive-examples/pom.xml
+++ b/examples/storm-hive-examples/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -96,9 +96,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-jdbc-examples/pom.xml b/examples/storm-jdbc-examples/pom.xml
index c334379..09cce61 100644
--- a/examples/storm-jdbc-examples/pom.xml
+++ b/examples/storm-jdbc-examples/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -86,9 +86,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-jms-examples/pom.xml b/examples/storm-jms-examples/pom.xml
index 56596f7..2bad95c 100644
--- a/examples/storm-jms-examples/pom.xml
+++ b/examples/storm-jms-examples/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -104,9 +104,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-kafka-client-examples/pom.xml b/examples/storm-kafka-client-examples/pom.xml
index 531a082..558c11d 100644
--- a/examples/storm-kafka-client-examples/pom.xml
+++ b/examples/storm-kafka-client-examples/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -102,9 +102,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-loadgen/pom.xml b/examples/storm-loadgen/pom.xml
index e634acc..6cdc6c3 100644
--- a/examples/storm-loadgen/pom.xml
+++ b/examples/storm-loadgen/pom.xml
@@ -20,7 +20,7 @@
   <parent>
     <artifactId>storm</artifactId>
     <groupId>org.apache.storm</groupId>
-    <version>2.0.1-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../../pom.xml</relativePath>
   </parent>
   <artifactId>storm-loadgen</artifactId>
@@ -123,9 +123,6 @@
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-checkstyle-plugin</artifactId>
         <!--Note - the version would be inherited-->
-        <configuration>
-          <maxAllowedViolations>0</maxAllowedViolations>
-        </configuration>
       </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadMetricsServer.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadMetricsServer.java
index 90e8f05..c02de27 100644
--- a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadMetricsServer.java
+++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadMetricsServer.java
@@ -313,7 +313,7 @@
     }
 
     private static class NoCloseOutputStream extends FilterOutputStream {
-        public NoCloseOutputStream(OutputStream out) {
+        NoCloseOutputStream(OutputStream out) {
             super(out);
         }
 
@@ -328,11 +328,11 @@
         protected final Map<String, MetricExtractor> allExtractors;
         public final boolean includesSysOutOrError;
 
-        public FileReporter(Map<String, MetricExtractor> allExtractors) throws FileNotFoundException {
+        FileReporter(Map<String, MetricExtractor> allExtractors) throws FileNotFoundException {
             this(null, Collections.emptyMap(), allExtractors);
         }
 
-        public FileReporter(String path, Map<String, String> query,  Map<String, MetricExtractor> allExtractors)
+        FileReporter(String path, Map<String, String> query,  Map<String, MetricExtractor> allExtractors)
             throws FileNotFoundException {
             boolean append = Boolean.parseBoolean(query.getOrDefault("append", "false"));
             boolean tee = Boolean.parseBoolean(query.getOrDefault("tee", "false"));
@@ -467,12 +467,12 @@
         private final String unit;
         private final BiFunction<Measurements, TimeUnit, Object> func;
 
-        public MetricExtractor(BiFunction<Measurements, TimeUnit, Object> func) {
+        MetricExtractor(BiFunction<Measurements, TimeUnit, Object> func) {
             this.func = func;
             this.unit = null;
         }
 
-        public MetricExtractor(BiFunction<Measurements, TimeUnit, Object> func, String unit) {
+        MetricExtractor(BiFunction<Measurements, TimeUnit, Object> func, String unit) {
             this.func = func;
             this.unit = unit;
         }
@@ -504,12 +504,12 @@
         protected final int precision;
         protected String doubleFormat;
 
-        public ColumnsFileReporter(String path, Map<String, String> query, Map<String, MetricExtractor> extractorsMap)
+        ColumnsFileReporter(String path, Map<String, String> query, Map<String, MetricExtractor> extractorsMap)
             throws FileNotFoundException {
             this(path, query, extractorsMap, null);
         }
 
-        public ColumnsFileReporter(String path, Map<String, String> query, Map<String, MetricExtractor> extractorsMap,
+        ColumnsFileReporter(String path, Map<String, String> query, Map<String, MetricExtractor> extractorsMap,
                                    String defaultPreceision) throws FileNotFoundException {
             super(path, query, extractorsMap);
             targetUnit = UNIT_MAP.get(query.getOrDefault("time", "MILLISECONDS").toUpperCase());
@@ -590,7 +590,7 @@
         public final String longFormat;
         public final String stringFormat;
 
-        public FixedWidthReporter(String path, Map<String, String> query, Map<String, MetricExtractor> extractorsMap)
+        FixedWidthReporter(String path, Map<String, String> query, Map<String, MetricExtractor> extractorsMap)
             throws FileNotFoundException {
             super(path, query, extractorsMap, "3");
             int columnWidth = Integer.parseInt(query.getOrDefault("columnWidth", "15")) - 1;//Always have a space in between
@@ -599,7 +599,7 @@
             stringFormat = "%" + columnWidth + "s";
         }
 
-        public FixedWidthReporter(Map<String, MetricExtractor> allExtractors) throws FileNotFoundException {
+        FixedWidthReporter(Map<String, MetricExtractor> allExtractors) throws FileNotFoundException {
             this(null, Collections.emptyMap(), allExtractors);
         }
 
@@ -652,7 +652,7 @@
     static class SepValReporter extends ColumnsFileReporter {
         private final String separator;
 
-        public SepValReporter(String separator, String path, Map<String, String> query, Map<String, MetricExtractor> extractorsMap)
+        SepValReporter(String separator, String path, Map<String, String> query, Map<String, MetricExtractor> extractorsMap)
             throws FileNotFoundException {
             super(path, query, extractorsMap);
             this.separator = separator;
@@ -702,12 +702,12 @@
     static class LegacyReporter extends FileReporter {
         private final TimeUnit targetUnitOverride;
 
-        public LegacyReporter(Map<String, MetricExtractor> allExtractors) throws FileNotFoundException {
+        LegacyReporter(Map<String, MetricExtractor> allExtractors) throws FileNotFoundException {
             super(allExtractors);
             targetUnitOverride = null;
         }
 
-        public LegacyReporter(String path, Map<String, String> query, Map<String, MetricExtractor> allExtractors)
+        LegacyReporter(String path, Map<String, String> query, Map<String, MetricExtractor> allExtractors)
             throws FileNotFoundException {
             super(path, query, allExtractors);
             if (query.containsKey("time")) {
diff --git a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadSpout.java b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadSpout.java
index ef454c5..611dc63 100644
--- a/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadSpout.java
+++ b/examples/storm-loadgen/src/main/java/org/apache/storm/loadgen/LoadSpout.java
@@ -41,7 +41,7 @@
     private static class OutputStreamEngineWithHisto extends OutputStreamEngine {
         public final HistogramMetric histogram;
 
-        public OutputStreamEngineWithHisto(OutputStream stats, TopologyContext context) {
+        OutputStreamEngineWithHisto(OutputStream stats, TopologyContext context) {
             super(stats);
             histogram = new HistogramMetric(3600000000000L, 3);
             //TODO perhaps we can adjust the frequency later...
diff --git a/examples/storm-mongodb-examples/pom.xml b/examples/storm-mongodb-examples/pom.xml
index 3766f7e..12fa0f9 100644
--- a/examples/storm-mongodb-examples/pom.xml
+++ b/examples/storm-mongodb-examples/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -86,9 +86,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-mqtt-examples/pom.xml b/examples/storm-mqtt-examples/pom.xml
index 3de7620..a67e059 100644
--- a/examples/storm-mqtt-examples/pom.xml
+++ b/examples/storm-mqtt-examples/pom.xml
@@ -26,7 +26,7 @@
   <parent>
     <artifactId>storm</artifactId>
     <groupId>org.apache.storm</groupId>
-    <version>2.0.1-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../../pom.xml</relativePath>
   </parent>
   
@@ -122,9 +122,6 @@
             <groupId>org.apache.maven.plugins</groupId>
             <artifactId>maven-checkstyle-plugin</artifactId>
             <!--Note - the version would be inherited-->
-            <configuration>
-                <maxAllowedViolations>0</maxAllowedViolations>
-            </configuration>
         </plugin>
         <plugin>
             <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-opentsdb-examples/pom.xml b/examples/storm-opentsdb-examples/pom.xml
index 90e9d76..df11587 100644
--- a/examples/storm-opentsdb-examples/pom.xml
+++ b/examples/storm-opentsdb-examples/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -86,9 +86,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-perf/pom.xml b/examples/storm-perf/pom.xml
index 07c4b17..84158cf 100644
--- a/examples/storm-perf/pom.xml
+++ b/examples/storm-perf/pom.xml
@@ -20,7 +20,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -91,9 +91,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/BackPressureTopo.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/BackPressureTopo.java
index 0443fd4..5fb7022 100644
--- a/examples/storm-perf/src/main/java/org/apache/storm/perf/BackPressureTopo.java
+++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/BackPressureTopo.java
@@ -88,7 +88,7 @@
         private OutputCollector collector;
         private long sleepMs;
 
-        public ThrottledBolt(Long sleepMs) {
+        ThrottledBolt(Long sleepMs) {
             this.sleepMs = sleepMs;
         }
 
diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/LowThroughputTopo.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/LowThroughputTopo.java
index 38e35bc..71ffd7c 100644
--- a/examples/storm-perf/src/main/java/org/apache/storm/perf/LowThroughputTopo.java
+++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/LowThroughputTopo.java
@@ -93,7 +93,7 @@
         private SpoutOutputCollector collector = null;
         private long sleepTimeMs;
 
-        public ThrottledSpout(long sleepMs) {
+        ThrottledSpout(long sleepMs) {
             this.sleepTimeMs = sleepMs;
         }
 
diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Acker.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Acker.java
index e936329..adcc3a3 100644
--- a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Acker.java
+++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Acker.java
@@ -27,7 +27,7 @@
     private final JCQueue ackerInQ;
     private final JCQueue spoutInQ;
 
-    public Acker(JCQueue ackerInQ, JCQueue spoutInQ) {
+    Acker(JCQueue ackerInQ, JCQueue spoutInQ) {
         super("Acker");
         this.ackerInQ = ackerInQ;
         this.spoutInQ = spoutInQ;
diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/AckingProducer.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/AckingProducer.java
index 132d793..0214515 100644
--- a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/AckingProducer.java
+++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/AckingProducer.java
@@ -27,7 +27,7 @@
     private final JCQueue ackerInQ;
     private final JCQueue spoutInQ;
 
-    public AckingProducer(JCQueue ackerInQ, JCQueue spoutInQ) {
+    AckingProducer(JCQueue ackerInQ, JCQueue spoutInQ) {
         super("AckingProducer");
         this.ackerInQ = ackerInQ;
         this.spoutInQ = spoutInQ;
diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Consumer.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Consumer.java
index 52ff210..eadb51d 100644
--- a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Consumer.java
+++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Consumer.java
@@ -26,7 +26,7 @@
     public final MutableLong counter = new MutableLong(0);
     private final JCQueue queue;
 
-    public Consumer(JCQueue queue) {
+    Consumer(JCQueue queue) {
         super("Consumer");
         this.queue = queue;
     }
diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Forwarder.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Forwarder.java
index 2f951dd..a5781d0 100644
--- a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Forwarder.java
+++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Forwarder.java
@@ -27,7 +27,7 @@
     private final JCQueue inq;
     private final JCQueue outq;
 
-    public Forwarder(JCQueue inq, JCQueue outq) {
+    Forwarder(JCQueue inq, JCQueue outq) {
         super("Forwarder");
         this.inq = inq;
         this.outq = outq;
diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/MyThread.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/MyThread.java
index 4c84316..5a5f6fc 100644
--- a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/MyThread.java
+++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/MyThread.java
@@ -22,7 +22,7 @@
     public long count = 0;
     public long runTime = 0;
 
-    public MyThread(String thdName) {
+    MyThread(String thdName) {
         super(thdName);
     }
 
diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Producer.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Producer.java
index eb02d5e..2801819 100644
--- a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Producer.java
+++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Producer.java
@@ -23,7 +23,7 @@
 class Producer extends MyThread {
     private final JCQueue queue;
 
-    public Producer(JCQueue queue) {
+    Producer(JCQueue queue) {
         super("Producer");
         this.queue = queue;
     }
diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Producer2.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Producer2.java
index 8df519b..c36d88d 100644
--- a/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Producer2.java
+++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/queuetest/Producer2.java
@@ -27,7 +27,7 @@
     private final JCQueue q1;
     private final JCQueue q2;
 
-    public Producer2(JCQueue q1, JCQueue q2) {
+    Producer2(JCQueue q1, JCQueue q2) {
         super("Producer2");
         this.q1 = q1;
         this.q2 = q2;
diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Cons.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Cons.java
index 3783310..257e9e7 100644
--- a/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Cons.java
+++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Cons.java
@@ -26,7 +26,7 @@
     public final MutableLong counter = new MutableLong(0);
     private final MpscArrayQueue<Object> queue;
 
-    public Cons(MpscArrayQueue<Object> queue) {
+    Cons(MpscArrayQueue<Object> queue) {
         super("Consumer");
         this.queue = queue;
     }
diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/MyThd.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/MyThd.java
index fbd7aab..0922c23 100644
--- a/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/MyThd.java
+++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/MyThd.java
@@ -23,7 +23,7 @@
     public long runTime = 0;
     public boolean halt = false;
 
-    public MyThd(String thdName) {
+    MyThd(String thdName) {
         super(thdName);
     }
 
diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Prod.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Prod.java
index 3553552..cfb1bf2 100644
--- a/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Prod.java
+++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Prod.java
@@ -23,7 +23,7 @@
 class Prod extends MyThd {
     private final MpscArrayQueue<Object> queue;
 
-    public Prod(MpscArrayQueue<Object> queue) {
+    Prod(MpscArrayQueue<Object> queue) {
         super("Producer");
         this.queue = queue;
     }
diff --git a/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Prod2.java b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Prod2.java
index 470522a..e0eea19 100644
--- a/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Prod2.java
+++ b/examples/storm-perf/src/main/java/org/apache/storm/perf/toolstest/Prod2.java
@@ -27,7 +27,7 @@
     private final MpscArrayQueue<Object> q1;
     private final MpscArrayQueue<Object> q2;
 
-    public Prod2(MpscArrayQueue<Object> q1, MpscArrayQueue<Object> q2) {
+    Prod2(MpscArrayQueue<Object> q1, MpscArrayQueue<Object> q2) {
         super("Producer2");
         this.q1 = q1;
         this.q2 = q2;
diff --git a/examples/storm-pmml-examples/pom.xml b/examples/storm-pmml-examples/pom.xml
index b687ac1..20d7284 100644
--- a/examples/storm-pmml-examples/pom.xml
+++ b/examples/storm-pmml-examples/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
     <modelVersion>4.0.0</modelVersion>
@@ -86,9 +86,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-redis-examples/pom.xml b/examples/storm-redis-examples/pom.xml
index 4ec2347..44d863d 100644
--- a/examples/storm-redis-examples/pom.xml
+++ b/examples/storm-redis-examples/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -90,9 +90,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/LookupWordCount.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/LookupWordCount.java
index 54456ec..243f1bf 100644
--- a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/LookupWordCount.java
+++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/LookupWordCount.java
@@ -126,7 +126,7 @@
         private RedisDataTypeDescription description;
         private final String hashKey = "wordCount";
 
-        public WordCountRedisLookupMapper() {
+        WordCountRedisLookupMapper() {
             description = new RedisDataTypeDescription(
                     RedisDataTypeDescription.RedisDataType.HASH, hashKey);
         }
diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/PersistentWordCount.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/PersistentWordCount.java
index eca75ea..93f790b 100644
--- a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/PersistentWordCount.java
+++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/PersistentWordCount.java
@@ -79,7 +79,7 @@
         private RedisDataTypeDescription description;
         private final String hashKey = "wordCount";
 
-        public WordCountStoreMapper() {
+        WordCountStoreMapper() {
             description = new RedisDataTypeDescription(
                     RedisDataTypeDescription.RedisDataType.HASH, hashKey);
         }
diff --git a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/WhitelistWordCount.java b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/WhitelistWordCount.java
index 8640335..5b47f0c 100644
--- a/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/WhitelistWordCount.java
+++ b/examples/storm-redis-examples/src/main/java/org/apache/storm/redis/topology/WhitelistWordCount.java
@@ -122,7 +122,7 @@
         private RedisDataTypeDescription description;
         private final String setKey = "whitelist";
 
-        public WhitelistWordFilterMapper() {
+        WhitelistWordFilterMapper() {
             description = new RedisDataTypeDescription(
                     RedisDataTypeDescription.RedisDataType.SET, setKey);
         }
diff --git a/examples/storm-rocketmq-examples/pom.xml b/examples/storm-rocketmq-examples/pom.xml
index 31774a4..176f93a 100644
--- a/examples/storm-rocketmq-examples/pom.xml
+++ b/examples/storm-rocketmq-examples/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -86,9 +86,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-solr-examples/pom.xml b/examples/storm-solr-examples/pom.xml
index 81570d1..5ccefcd 100644
--- a/examples/storm-solr-examples/pom.xml
+++ b/examples/storm-solr-examples/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -117,9 +117,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-starter/pom.xml b/examples/storm-starter/pom.xml
index 3835bbc..5636146 100644
--- a/examples/storm-starter/pom.xml
+++ b/examples/storm-starter/pom.xml
@@ -20,7 +20,7 @@
   <parent>
       <artifactId>storm</artifactId>
       <groupId>org.apache.storm</groupId>
-      <version>2.0.1-SNAPSHOT</version>
+      <version>2.2.0-SNAPSHOT</version>
       <relativePath>../../pom.xml</relativePath>
   </parent>
 
@@ -228,9 +228,6 @@
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-checkstyle-plugin</artifactId>
           <!--Note - the version would be inherited-->
-          <configuration>
-              <maxAllowedViolations>0</maxAllowedViolations>
-          </configuration>
       </plugin>
         <plugin>
             <groupId>org.apache.maven.plugins</groupId>
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/Prefix.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/Prefix.java
index 94df975..31f200a 100644
--- a/examples/storm-starter/src/jvm/org/apache/storm/starter/Prefix.java
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/Prefix.java
@@ -17,7 +17,7 @@
 class Prefix implements Serializable {
     private String str;
 
-    public Prefix(String str) {
+    Prefix(String str) {
         this.str = str;
     }
 
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfDevicesTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfDevicesTopology.java
index 176326e..5944408 100644
--- a/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfDevicesTopology.java
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfDevicesTopology.java
@@ -156,7 +156,7 @@
         final int maxSpeed;
         final double efficiency;
 
-        public Vehicle(String name, int maxSpeed, double efficiency) {
+        Vehicle(String name, int maxSpeed, double efficiency) {
             this.name = name;
             this.maxSpeed = maxSpeed;
             this.efficiency = efficiency;
diff --git a/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfVehiclesTopology.java b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfVehiclesTopology.java
index 83e9d62..3c0ff31 100644
--- a/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfVehiclesTopology.java
+++ b/examples/storm-starter/src/jvm/org/apache/storm/starter/trident/TridentMinMaxOfVehiclesTopology.java
@@ -134,7 +134,7 @@
         final int maxSpeed;
         final double efficiency;
 
-        public Vehicle(String name, int maxSpeed, double efficiency) {
+        Vehicle(String name, int maxSpeed, double efficiency) {
             this.name = name;
             this.maxSpeed = maxSpeed;
             this.efficiency = efficiency;
diff --git a/external/storm-autocreds/pom.xml b/external/storm-autocreds/pom.xml
index 87b2a2f..3397352 100644
--- a/external/storm-autocreds/pom.xml
+++ b/external/storm-autocreds/pom.xml
@@ -19,7 +19,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
     <modelVersion>4.0.0</modelVersion>
@@ -206,9 +206,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-autocreds/src/main/java/org/apache/storm/common/AbstractHadoopNimbusPluginAutoCreds.java b/external/storm-autocreds/src/main/java/org/apache/storm/common/AbstractHadoopNimbusPluginAutoCreds.java
index 807d2ad..8cea761 100644
--- a/external/storm-autocreds/src/main/java/org/apache/storm/common/AbstractHadoopNimbusPluginAutoCreds.java
+++ b/external/storm-autocreds/src/main/java/org/apache/storm/common/AbstractHadoopNimbusPluginAutoCreds.java
@@ -119,11 +119,11 @@
      */
     protected abstract String getConfigKeyString();
 
-    protected abstract byte[] getHadoopCredentials(Map<String, Object> topologyConf, String configKey, final String topologyOwnerPrincipal);
+    protected abstract byte[] getHadoopCredentials(Map<String, Object> topologyConf, String configKey, String topologyOwnerPrincipal);
 
-    protected abstract byte[] getHadoopCredentials(Map<String, Object> topologyConf, final String topologyOwnerPrincipal);
+    protected abstract byte[] getHadoopCredentials(Map<String, Object> topologyConf, String topologyOwnerPrincipal);
 
-    protected abstract void doRenew(Map<String, String> credentials, Map<String, Object> topologyConf, final String topologyOwnerPrincipal);
+    protected abstract void doRenew(Map<String, String> credentials, Map<String, Object> topologyConf, String topologyOwnerPrincipal);
 
     protected List<String> getConfigKeys(Map<String, Object> conf) {
         String configKeyString = getConfigKeyString();
diff --git a/external/storm-blobstore-migration/pom.xml b/external/storm-blobstore-migration/pom.xml
index d7c2eb2..a7d6a68 100644
--- a/external/storm-blobstore-migration/pom.xml
+++ b/external/storm-blobstore-migration/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -128,9 +128,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-cassandra/pom.xml b/external/storm-cassandra/pom.xml
index 495f9d5..101e6db 100644
--- a/external/storm-cassandra/pom.xml
+++ b/external/storm-cassandra/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -131,9 +131,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/context/BaseBeanFactory.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/context/BaseBeanFactory.java
index de6afaf..37b638a 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/context/BaseBeanFactory.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/context/BaseBeanFactory.java
@@ -45,7 +45,7 @@
     /**
      * Return a new instance of T.
      */
-    protected abstract T make(final Map<String, Object> topoConf);
+    protected abstract T make(Map<String, Object> topoConf);
 
     /**
      * {@inheritDoc}
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/context/BeanFactory.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/context/BeanFactory.java
index 8dbbbae..78414f8 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/context/BeanFactory.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/context/BeanFactory.java
@@ -23,7 +23,7 @@
     /**
      * Sets the storm context.
      */
-    public void setStormContext(WorkerCtx context);
+    void setStormContext(WorkerCtx context);
 
     /**
      * Return an instance, which may be shared or independent, of the specified type.
@@ -36,5 +36,5 @@
      * Returns a new copy if this factory.
      * @return a new {@link BeanFactory} instance.
      */
-    public BeanFactory<T> newInstance();
+    BeanFactory<T> newInstance();
 }
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncExecutor.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncExecutor.java
index eec9026..0b8f66c 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncExecutor.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncExecutor.java
@@ -222,7 +222,7 @@
         private final List<Throwable> exceptions;
         private final Semaphore throttle;
 
-        public AsyncContext(List<T> inputs, Semaphore throttle, SettableFuture<List<T>> settableFuture) {
+        AsyncContext(List<T> inputs, Semaphore throttle, SettableFuture<List<T>> settableFuture) {
             this.inputs = inputs;
             this.latch = new AtomicInteger(inputs.size());
             this.throttle = throttle;
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncResultHandler.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncResultHandler.java
index 2f8766e..8241347 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncResultHandler.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncResultHandler.java
@@ -20,7 +20,7 @@
  */
 public interface AsyncResultHandler<T> extends Serializable {
 
-    public static final AsyncResultHandler NO_OP_HANDLER = new AsyncResultHandler() {
+    AsyncResultHandler NO_OP_HANDLER = new AsyncResultHandler() {
         @Override
         public void failure(Throwable t, Object inputs) {
             /** no-operation **/
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncResultSetHandler.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncResultSetHandler.java
index 7a2159d..23f755a 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncResultSetHandler.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/AsyncResultSetHandler.java
@@ -20,7 +20,7 @@
  */
 public interface AsyncResultSetHandler<T> extends Serializable {
 
-    public static final AsyncResultSetHandler NO_OP_HANDLER = new AsyncResultSetHandler() {
+    AsyncResultSetHandler NO_OP_HANDLER = new AsyncResultSetHandler() {
         @Override
         public void failure(Throwable t, Object inputs) {
             /** no-operation **/
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/ExecutionResultCollector.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/ExecutionResultCollector.java
index 8c8057d..258a927 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/ExecutionResultCollector.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/executor/ExecutionResultCollector.java
@@ -25,7 +25,7 @@
 
     void handle(OutputCollector collector, ExecutionResultHandler handler);
 
-    public static final class SucceedCollector implements ExecutionResultCollector {
+    final class SucceedCollector implements ExecutionResultCollector {
 
         private final List<Tuple> inputs;
 
@@ -60,7 +60,7 @@
         }
     }
 
-    public static final class FailedCollector implements ExecutionResultCollector {
+    final class FailedCollector implements ExecutionResultCollector {
 
         private final Throwable cause;
         private final List<Tuple> inputs;
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CQLStatementTupleMapper.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CQLStatementTupleMapper.java
index 29290a2..1b0ab79 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CQLStatementTupleMapper.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CQLStatementTupleMapper.java
@@ -37,7 +37,7 @@
     List<Statement> map(Map<String, Object> conf, Session session, ITuple tuple);
 
     @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
-    public static class DynamicCQLStatementTupleMapper implements CQLStatementTupleMapper {
+    class DynamicCQLStatementTupleMapper implements CQLStatementTupleMapper {
         private List<CQLStatementBuilder> builders;
 
         public DynamicCQLStatementTupleMapper(List<CQLStatementBuilder> builders) {
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/ContextQuery.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/ContextQuery.java
index f308540..15594ac 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/ContextQuery.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/ContextQuery.java
@@ -30,12 +30,12 @@
      *
      * @return a string bound query.
      */
-    public String resolves(Map<String, Object> config, ITuple tuple);
+    String resolves(Map<String, Object> config, ITuple tuple);
 
     /**
      * Static implementation of {@link ContextQuery} interface.
      */
-    public static final class StaticContextQuery implements ContextQuery {
+    final class StaticContextQuery implements ContextQuery {
         private final String value;
 
         /**
@@ -55,7 +55,7 @@
      * Default {@link BoundQueryContext} implementation to retrieve a bound query
      * identified by the provided key.
      */
-    public static final class BoundQueryContext implements ContextQuery {
+    final class BoundQueryContext implements ContextQuery {
         private String key;
 
         public BoundQueryContext(String key) {
@@ -76,7 +76,7 @@
      * Default {@link BoundQueryNamedByFieldContext} implementation to retrieve a bound query named by
      * the value of a specified tuple field.
      */
-    public static final class BoundQueryNamedByFieldContext implements ContextQuery {
+    final class BoundQueryNamedByFieldContext implements ContextQuery {
 
         private String fieldName;
 
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CqlMapper.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CqlMapper.java
index 9a1892b..51368fd 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CqlMapper.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/CqlMapper.java
@@ -31,7 +31,7 @@
     List<Column> map(ITuple tuple);
 
 
-    public static final class SelectableCqlMapper implements CqlMapper {
+    final class SelectableCqlMapper implements CqlMapper {
 
         private final List<FieldSelector> selectors;
 
@@ -59,7 +59,7 @@
     /**
      * Default {@link CqlMapper} to map all tuple values to column.
      */
-    public static final class DefaultCqlMapper implements CqlMapper {
+    final class DefaultCqlMapper implements CqlMapper {
 
         /**
          * Creates a new {@link org.apache.storm.cassandra.query.CqlMapper.DefaultCqlMapper} instance.
diff --git a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/PreparedStatementBinder.java b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/PreparedStatementBinder.java
index 1c82ee6..0019ab0 100644
--- a/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/PreparedStatementBinder.java
+++ b/external/storm-cassandra/src/main/java/org/apache/storm/cassandra/query/impl/PreparedStatementBinder.java
@@ -21,9 +21,9 @@
 
 public interface PreparedStatementBinder extends Serializable {
 
-    public BoundStatement apply(PreparedStatement statement, List<Column> columns);
+    BoundStatement apply(PreparedStatement statement, List<Column> columns);
 
-    public static final class DefaultBinder implements PreparedStatementBinder {
+    final class DefaultBinder implements PreparedStatementBinder {
 
         /**
          * {@inheritDoc}
@@ -36,7 +36,7 @@
     }
 
     @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
-    public static final class CQL3NamedSettersBinder implements PreparedStatementBinder {
+    final class CQL3NamedSettersBinder implements PreparedStatementBinder {
 
         /**
          * {@inheritDoc}
diff --git a/external/storm-elasticsearch/pom.xml b/external/storm-elasticsearch/pom.xml
index 9785772..6c85f53 100644
--- a/external/storm-elasticsearch/pom.xml
+++ b/external/storm-elasticsearch/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -147,9 +147,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsState.java b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsState.java
index 86863d7..3a8e202 100644
--- a/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsState.java
+++ b/external/storm-elasticsearch/src/main/java/org/apache/storm/elasticsearch/trident/EsState.java
@@ -58,7 +58,7 @@
      * @param esConfig Elasticsearch configuration containing node addresses and cluster name {@link EsConfig}
      * @param tupleMapper Tuple to ES document mapper {@link EsTupleMapper}
      */
-    public EsState(EsConfig esConfig, EsTupleMapper tupleMapper) {
+    EsState(EsConfig esConfig, EsTupleMapper tupleMapper) {
         this.esConfig = esConfig;
         this.objectMapper = new ObjectMapper();
         this.tupleMapper = tupleMapper;
diff --git a/external/storm-eventhubs/pom.xml b/external/storm-eventhubs/pom.xml
index 8dec463..95a21e8 100755
--- a/external/storm-eventhubs/pom.xml
+++ b/external/storm-eventhubs/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
     
@@ -52,9 +52,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/IEventDataFormat.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/IEventDataFormat.java
index 08f8e63..fb03a7c 100644
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/IEventDataFormat.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/bolt/IEventDataFormat.java
@@ -25,5 +25,5 @@
  * Serialize a tuple to a byte array to be sent to EventHubs.
  */
 public interface IEventDataFormat extends Serializable {
-    public byte[] serialize(Tuple tuple);
+    byte[] serialize(Tuple tuple);
 }
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/IStateStore.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/IStateStore.java
index 311ceb7..a2c4d4f 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/IStateStore.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/spout/IStateStore.java
@@ -22,11 +22,11 @@
 
 public interface IStateStore extends Serializable {
 
-    public void open();
+    void open();
 
-    public void close();
+    void close();
 
-    public void saveData(String path, String data);
+    void saveData(String path, String data);
 
-    public String readData(String path);
+    String readData(String path);
 }
diff --git a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/ITridentPartitionManager.java b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/ITridentPartitionManager.java
index e1abdba..aa1e567 100755
--- a/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/ITridentPartitionManager.java
+++ b/external/storm-eventhubs/src/main/java/org/apache/storm/eventhubs/trident/ITridentPartitionManager.java
@@ -33,5 +33,5 @@
      * @param count  max number of messages in this batch
      * @return list of EventData, if failed to receive, return empty list
      */
-    public List<EventDataWrap> receiveBatch(String offset, int count);
+    List<EventDataWrap> receiveBatch(String offset, int count);
 }
diff --git a/external/storm-hbase/pom.xml b/external/storm-hbase/pom.xml
index 6620f6a..9fe0518 100644
--- a/external/storm-hbase/pom.xml
+++ b/external/storm-hbase/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -116,9 +116,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/HBaseValueMapper.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/HBaseValueMapper.java
index d26b125..a234de7 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/HBaseValueMapper.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/bolt/mapper/HBaseValueMapper.java
@@ -27,7 +27,7 @@
      * @param result HBase lookup result instance
      * @return list of values that should be emitted by the lookup bolt
      */
-    public List<Values> toValues(ITuple input, Result result) throws Exception;
+    List<Values> toValues(ITuple input, Result result) throws Exception;
 
     /**
      * declares the output fields for the lookup bolt.
diff --git a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/mapper/TridentHBaseMapMapper.java b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/mapper/TridentHBaseMapMapper.java
index a9ea429..fde9dea 100644
--- a/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/mapper/TridentHBaseMapMapper.java
+++ b/external/storm-hbase/src/main/java/org/apache/storm/hbase/trident/mapper/TridentHBaseMapMapper.java
@@ -25,10 +25,10 @@
     /**
      * Given a tuple's grouped key list, return the HBase rowkey.
      */
-    public byte[] rowKey(List<Object> keys);
+    byte[] rowKey(List<Object> keys);
 
     /**
      * Given a tuple's grouped key list, return the HBase qualifier.
      */
-    public String qualifier(List<Object> keys);
+    String qualifier(List<Object> keys);
 }
diff --git a/external/storm-hdfs-blobstore/pom.xml b/external/storm-hdfs-blobstore/pom.xml
index ccc69a6..c035785 100644
--- a/external/storm-hdfs-blobstore/pom.xml
+++ b/external/storm-hdfs-blobstore/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -259,9 +259,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-hdfs/pom.xml b/external/storm-hdfs/pom.xml
index a19d821..6528917 100644
--- a/external/storm-hdfs/pom.xml
+++ b/external/storm-hdfs/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -274,9 +274,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AbstractHdfsBolt.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AbstractHdfsBolt.java
index a145274..ee82066 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AbstractHdfsBolt.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/AbstractHdfsBolt.java
@@ -311,7 +311,7 @@
         final long maxWriters;
         final OutputCollector collector;
 
-        public WritersMap(long maxWriters, OutputCollector collector) {
+        WritersMap(long maxWriters, OutputCollector collector) {
             super((int) maxWriters, 0.75f, true);
             this.maxWriters = maxWriters;
             this.collector = collector;
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileSizeRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileSizeRotationPolicy.java
index 24a46dd..eca10d6 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileSizeRotationPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/FileSizeRotationPolicy.java
@@ -61,7 +61,7 @@
         return new FileSizeRotationPolicy(this.maxBytes);
     }
 
-    public static enum Units {
+    public enum Units {
 
         KB((long) Math.pow(2, 10)),
         MB((long) Math.pow(2, 20)),
@@ -70,7 +70,7 @@
 
         private long byteCount;
 
-        private Units(long byteCount) {
+        Units(long byteCount) {
             this.byteCount = byteCount;
         }
 
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/TimedRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/TimedRotationPolicy.java
index e600831..c2f7e16 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/TimedRotationPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/bolt/rotation/TimedRotationPolicy.java
@@ -61,7 +61,7 @@
         return this.interval;
     }
 
-    public static enum TimeUnit {
+    public enum TimeUnit {
 
         SECONDS((long) 1000),
         MINUTES((long) 1000 * 60),
@@ -70,7 +70,7 @@
 
         private long milliSeconds;
 
-        private TimeUnit(long milliSeconds) {
+        TimeUnit(long milliSeconds) {
             this.milliSeconds = milliSeconds;
         }
 
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/Partitioner.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/Partitioner.java
index 1488655..92f6747 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/Partitioner.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/common/Partitioner.java
@@ -25,5 +25,5 @@
      *
      * @param tuple The tuple for which the relative path is being calculated.
      */
-    public String getPartitionPath(final Tuple tuple);
+    String getPartitionPath(Tuple tuple);
 }
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/AbstractFileReader.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/AbstractFileReader.java
index 6d5537b..614d240 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/AbstractFileReader.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/AbstractFileReader.java
@@ -20,7 +20,7 @@
 
     private final Path file;
 
-    public AbstractFileReader(FileSystem fs, Path file) {
+    AbstractFileReader(FileSystem fs, Path file) {
         if (fs == null) {
             throw new IllegalArgumentException("filesystem arg cannot be null for reader");
         }
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/HdfsSpout.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/HdfsSpout.java
index a7ce729..446fb85 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/HdfsSpout.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/spout/HdfsSpout.java
@@ -793,7 +793,7 @@
         public String fullPath;
         public FileOffset offset;
 
-        public MessageId(long msgNumber, Path fullPath, FileOffset offset) {
+        MessageId(long msgNumber, Path fullPath, FileOffset offset) {
             this.msgNumber = msgNumber;
             this.fullPath = fullPath.toString();
             this.offset = offset;
@@ -821,13 +821,13 @@
         public final Path oldFile;
         public final Path newFile;
 
-        public RenameException(Path oldFile, Path newFile) {
+        RenameException(Path oldFile, Path newFile) {
             super("Rename of " + oldFile + " to " + newFile + " failed");
             this.oldFile = oldFile;
             this.newFile = newFile;
         }
 
-        public RenameException(Path oldFile, Path newFile, IOException cause) {
+        RenameException(Path oldFile, Path newFile, IOException cause) {
             super("Rename of " + oldFile + " to " + newFile + " failed", cause);
             this.oldFile = oldFile;
             this.newFile = newFile;
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileSizeRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileSizeRotationPolicy.java
index 18790d8..f9631b7 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileSizeRotationPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/FileSizeRotationPolicy.java
@@ -65,7 +65,7 @@
         return maxBytes;
     }
 
-    public static enum Units {
+    public enum Units {
 
         KB((long) Math.pow(2, 10)),
         MB((long) Math.pow(2, 20)),
@@ -74,7 +74,7 @@
 
         private long byteCount;
 
-        private Units(long byteCount) {
+        Units(long byteCount) {
             this.byteCount = byteCount;
         }
 
diff --git a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/TimedRotationPolicy.java b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/TimedRotationPolicy.java
index 4539464..5f4475c 100644
--- a/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/TimedRotationPolicy.java
+++ b/external/storm-hdfs/src/main/java/org/apache/storm/hdfs/trident/rotation/TimedRotationPolicy.java
@@ -77,7 +77,7 @@
         rotationTimer.scheduleAtFixedRate(task, interval, interval);
     }
 
-    public static enum TimeUnit {
+    public enum TimeUnit {
 
         SECONDS((long) 1000),
         MINUTES((long) 1000 * 60),
@@ -86,7 +86,7 @@
 
         private long milliSeconds;
 
-        private TimeUnit(long milliSeconds) {
+        TimeUnit(long milliSeconds) {
             this.milliSeconds = milliSeconds;
         }
 
diff --git a/external/storm-hive/pom.xml b/external/storm-hive/pom.xml
index 4ca6f0b..777eb4a 100644
--- a/external/storm-hive/pom.xml
+++ b/external/storm-hive/pom.xml
@@ -21,7 +21,7 @@
   <parent>
     <artifactId>storm</artifactId>
     <groupId>org.apache.storm</groupId>
-    <version>2.0.1-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <relativePath>../../pom.xml</relativePath>
   </parent>
 
@@ -245,9 +245,6 @@
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-checkstyle-plugin</artifactId>
         <!--Note - the version would be inherited-->
-        <configuration>
-          <maxAllowedViolations>0</maxAllowedViolations>
-        </configuration>
       </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-jdbc/pom.xml b/external/storm-jdbc/pom.xml
index 2b31123..e90a454 100644
--- a/external/storm-jdbc/pom.xml
+++ b/external/storm-jdbc/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -89,9 +89,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/JdbcLookupMapper.java b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/JdbcLookupMapper.java
index 32e98e2..2736814 100644
--- a/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/JdbcLookupMapper.java
+++ b/external/storm-jdbc/src/main/java/org/apache/storm/jdbc/mapper/JdbcLookupMapper.java
@@ -27,7 +27,7 @@
      * @param columns list of columns that represents a row
      * @return a List of storm values that can be emitted. Each item in list is emitted as an output tuple.
      */
-    public List<Values> toTuple(ITuple input, List<Column> columns);
+    List<Values> toTuple(ITuple input, List<Column> columns);
 
     /**
      * declare what are the fields that this code will output.
diff --git a/external/storm-jms/pom.xml b/external/storm-jms/pom.xml
index ceb4efb..7ee187f 100644
--- a/external/storm-jms/pom.xml
+++ b/external/storm-jms/pom.xml
@@ -20,7 +20,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -80,9 +80,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-jms/src/main/java/org/apache/storm/jms/JmsMessageProducer.java b/external/storm-jms/src/main/java/org/apache/storm/jms/JmsMessageProducer.java
index 1dcd608..e9010ab 100644
--- a/external/storm-jms/src/main/java/org/apache/storm/jms/JmsMessageProducer.java
+++ b/external/storm-jms/src/main/java/org/apache/storm/jms/JmsMessageProducer.java
@@ -29,5 +29,5 @@
      * Translate a <code>org.apache.storm.tuple.Tuple</code> object
      * to a <code>javax.jms.Message</code> object.
      */
-    public Message toMessage(Session session, ITuple input) throws JMSException;
+    Message toMessage(Session session, ITuple input) throws JMSException;
 }
diff --git a/external/storm-jms/src/main/java/org/apache/storm/jms/JmsProvider.java b/external/storm-jms/src/main/java/org/apache/storm/jms/JmsProvider.java
index 324ff9d..be81fad 100644
--- a/external/storm-jms/src/main/java/org/apache/storm/jms/JmsProvider.java
+++ b/external/storm-jms/src/main/java/org/apache/storm/jms/JmsProvider.java
@@ -28,11 +28,11 @@
      *
      * @return the connection factory
      */
-    public ConnectionFactory connectionFactory() throws Exception;
+    ConnectionFactory connectionFactory() throws Exception;
 
     /**
      * Provides the <code>Destination</code> (topic or queue) from which the
      * <code>JmsSpout</code> will receive messages.
      */
-    public Destination destination() throws Exception;
+    Destination destination() throws Exception;
 }
diff --git a/external/storm-jms/src/main/java/org/apache/storm/jms/trident/TridentJmsSpout.java b/external/storm-jms/src/main/java/org/apache/storm/jms/trident/TridentJmsSpout.java
index bf310f4..cd0bf54 100644
--- a/external/storm-jms/src/main/java/org/apache/storm/jms/trident/TridentJmsSpout.java
+++ b/external/storm-jms/src/main/java/org/apache/storm/jms/trident/TridentJmsSpout.java
@@ -78,7 +78,7 @@
      * @return A friendly string describing the acknowledge mode
      * @throws IllegalArgumentException if the mode is not recognized
      */
-    private static final String toDeliveryModeString(int acknowledgeMode) {
+    private static String toDeliveryModeString(int acknowledgeMode) {
         switch (acknowledgeMode) {
             case Session.AUTO_ACKNOWLEDGE:
                 return "AUTO_ACKNOWLEDGE";
@@ -188,7 +188,7 @@
         private final Logger log = LoggerFactory.getLogger(JmsEmitter.class);
         private long lastRotate;
 
-        public JmsEmitter(String name, JmsProvider jmsProvider, JmsTupleProducer tupleProducer, int jmsAcknowledgeMode,
+        JmsEmitter(String name, JmsProvider jmsProvider, JmsTupleProducer tupleProducer, int jmsAcknowledgeMode,
                           Map<String, Object> conf) {
             if (jmsProvider == null) {
                 throw new IllegalStateException("JMS provider has not been set.");
@@ -359,7 +359,7 @@
 
         private final Logger log = LoggerFactory.getLogger(JmsBatchCoordinator.class);
 
-        public JmsBatchCoordinator(String name) {
+        JmsBatchCoordinator(String name) {
             this.name = name;
             log.info("Created batch coordinator for " + name);
         }
diff --git a/external/storm-kafka-client/pom.xml b/external/storm-kafka-client/pom.xml
index f5d024d..6f86c36 100644
--- a/external/storm-kafka-client/pom.xml
+++ b/external/storm-kafka-client/pom.xml
@@ -22,7 +22,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -175,9 +175,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/KafkaSpout.java b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/KafkaSpout.java
index 27a531c..391aecc 100644
--- a/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/KafkaSpout.java
+++ b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/KafkaSpout.java
@@ -728,7 +728,7 @@
         //The subset of earliest retriable offsets that are on pollable partitions
         private final Map<TopicPartition, Long> pollableEarliestRetriableOffsets;
 
-        public PollablePartitionsInfo(Set<TopicPartition> pollablePartitions, Map<TopicPartition, Long> earliestRetriableOffsets) {
+        PollablePartitionsInfo(Set<TopicPartition> pollablePartitions, Map<TopicPartition, Long> earliestRetriableOffsets) {
             this.pollablePartitions = pollablePartitions;
             this.pollableEarliestRetriableOffsets = earliestRetriableOffsets.entrySet().stream()
                 .filter(entry -> pollablePartitions.contains(entry.getKey()))
diff --git a/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/KafkaSpoutRetryExponentialBackoff.java b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/KafkaSpoutRetryExponentialBackoff.java
index f0db397..0b99a40 100644
--- a/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/KafkaSpoutRetryExponentialBackoff.java
+++ b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/KafkaSpoutRetryExponentialBackoff.java
@@ -73,7 +73,7 @@
         private final KafkaSpoutMessageId msgId;
         private long nextRetryTimeNanos;
 
-        public RetrySchedule(KafkaSpoutMessageId msgId, long nextRetryTimeNanos) {
+        RetrySchedule(KafkaSpoutMessageId msgId, long nextRetryTimeNanos) {
             this.msgId = msgId;
             this.nextRetryTimeNanos = nextRetryTimeNanos;
             LOG.debug("Created {}", this);
diff --git a/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/RecordTranslator.java b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/RecordTranslator.java
index 12263bd..e8d938c 100644
--- a/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/RecordTranslator.java
+++ b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/RecordTranslator.java
@@ -29,7 +29,7 @@
  * Translate a {@link org.apache.kafka.clients.consumer.ConsumerRecord} to a tuple.
  */
 public interface RecordTranslator<K, V> extends Serializable, Func<ConsumerRecord<K, V>, List<Object>> {
-    public static final List<String> DEFAULT_STREAM = Collections.singletonList("default");
+    List<String> DEFAULT_STREAM = Collections.singletonList("default");
     
     /**
      * Translate the ConsumerRecord into a list of objects that can be emitted.
diff --git a/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/internal/ConsumerFactory.java b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/internal/ConsumerFactory.java
index 469a5f6..e7f9288 100644
--- a/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/internal/ConsumerFactory.java
+++ b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/internal/ConsumerFactory.java
@@ -24,5 +24,5 @@
  * This is here to enable testing.
  */
 public interface ConsumerFactory<K, V> extends Serializable {
-    public Consumer<K,V> createConsumer(Map<String, Object> consumerProps);
+    Consumer<K,V> createConsumer(Map<String, Object> consumerProps);
 }
diff --git a/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/subscription/ManualPartitioner.java b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/subscription/ManualPartitioner.java
index 9db0613..b6f3060 100644
--- a/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/subscription/ManualPartitioner.java
+++ b/external/storm-kafka-client/src/main/java/org/apache/storm/kafka/spout/subscription/ManualPartitioner.java
@@ -40,5 +40,5 @@
      * @param context the context of the topology
      * @return the subset of the partitions that this spout task should handle.
      */
-    public Set<TopicPartition> getPartitionsForThisTask(List<TopicPartition> allPartitionsSorted, TopologyContext context);
+    Set<TopicPartition> getPartitionsForThisTask(List<TopicPartition> allPartitionsSorted, TopologyContext context);
 }
diff --git a/external/storm-kafka-migration/pom.xml b/external/storm-kafka-migration/pom.xml
index 71b7e0b..ca0891d 100644
--- a/external/storm-kafka-migration/pom.xml
+++ b/external/storm-kafka-migration/pom.xml
@@ -22,7 +22,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -81,9 +81,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-kafka-migration/src/main/java/org/apache/storm/kafka/migration/KafkaTridentSpoutMigration.java b/external/storm-kafka-migration/src/main/java/org/apache/storm/kafka/migration/KafkaTridentSpoutMigration.java
index cfec761..25a16dc 100644
--- a/external/storm-kafka-migration/src/main/java/org/apache/storm/kafka/migration/KafkaTridentSpoutMigration.java
+++ b/external/storm-kafka-migration/src/main/java/org/apache/storm/kafka/migration/KafkaTridentSpoutMigration.java
@@ -59,7 +59,7 @@
         private final long firstOffset;
         private final long lastOffset;
 
-        public PartitionMetadata(long firstOffset, long lastOffset) {
+        PartitionMetadata(long firstOffset, long lastOffset) {
             this.firstOffset = firstOffset;
             this.lastOffset = lastOffset;
         }
diff --git a/external/storm-kafka-monitor/pom.xml b/external/storm-kafka-monitor/pom.xml
index cd18c76..264318f 100644
--- a/external/storm-kafka-monitor/pom.xml
+++ b/external/storm-kafka-monitor/pom.xml
@@ -20,7 +20,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
     <modelVersion>4.0.0</modelVersion>
@@ -74,9 +74,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-kinesis/pom.xml b/external/storm-kinesis/pom.xml
index 39c89f1..80a4fd5 100644
--- a/external/storm-kinesis/pom.xml
+++ b/external/storm-kinesis/pom.xml
@@ -17,7 +17,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
     <modelVersion>4.0.0</modelVersion>
@@ -61,9 +61,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-metrics/pom.xml b/external/storm-metrics/pom.xml
index 5cb3ec3..dcdb4e7 100644
--- a/external/storm-metrics/pom.xml
+++ b/external/storm-metrics/pom.xml
@@ -20,7 +20,7 @@
   <parent>
       <artifactId>storm</artifactId>
       <groupId>org.apache.storm</groupId>
-      <version>2.0.1-SNAPSHOT</version>
+      <version>2.2.0-SNAPSHOT</version>
       <relativePath>../../pom.xml</relativePath>
   </parent>
 
@@ -101,9 +101,6 @@
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-checkstyle-plugin</artifactId>
         <!--Note - the version would be inherited-->
-        <configuration>
-          <maxAllowedViolations>0</maxAllowedViolations>
-        </configuration>
       </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-mongodb/pom.xml b/external/storm-mongodb/pom.xml
index e8ef9c7..bf1082f 100644
--- a/external/storm-mongodb/pom.xml
+++ b/external/storm-mongodb/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
diff --git a/external/storm-mqtt/pom.xml b/external/storm-mqtt/pom.xml
index ad9ffcf..bdc001e 100644
--- a/external/storm-mqtt/pom.xml
+++ b/external/storm-mqtt/pom.xml
@@ -25,7 +25,7 @@
     <parent>
         <groupId>org.apache.storm</groupId>
         <artifactId>storm</artifactId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
     
@@ -137,9 +137,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-opentsdb/pom.xml b/external/storm-opentsdb/pom.xml
index dd6eca0..e7694ad 100644
--- a/external/storm-opentsdb/pom.xml
+++ b/external/storm-opentsdb/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -35,10 +35,6 @@
         </developer>
     </developers>
 
-    <properties>
-        <jersey.version>2.27</jersey.version>
-    </properties>
-
     <dependencies>
         <dependency>
             <groupId>org.apache.storm</groupId>
@@ -61,32 +57,34 @@
         <dependency>
             <groupId>org.glassfish.jersey.core</groupId>
             <artifactId>jersey-client</artifactId>
-            <version>${jersey.version}</version>
         </dependency>
         <dependency>
             <groupId>org.glassfish.jersey.media</groupId>
             <artifactId>jersey-media-json-jackson</artifactId>
-            <version>${jersey.version}</version>
         </dependency>
         <dependency>
             <groupId>org.apache.httpcomponents</groupId>
             <artifactId>httpclient</artifactId>
-            <version>4.5</version>
         </dependency>
         <dependency>
             <groupId>org.glassfish.jersey.connectors</groupId>
             <artifactId>jersey-apache-connector</artifactId>
-            <version>${jersey.version}</version>
         </dependency>
-        <!-- Java 9+ compatibility, ensure Java EE classes are on classpath when using jersey -->
+        <!-- Extra Java 11 jars for Jersey. Jersey's dependency tree only includes these on Java 11,
+        so we need to include them manually to ensure that Java 8 builds work on Java 11. -->
         <dependency>
-            <groupId>javax.activation</groupId>
-            <artifactId>activation</artifactId>
+            <groupId>com.sun.activation</groupId>
+            <artifactId>jakarta.activation</artifactId>
         </dependency>
         <dependency>
-            <groupId>javax.xml.bind</groupId>
-            <artifactId>jaxb-api</artifactId>
+            <groupId>jakarta.activation</groupId>
+            <artifactId>jakarta.activation-api</artifactId>
         </dependency>
+        <dependency>
+            <groupId>jakarta.xml.bind</groupId>
+            <artifactId>jakarta.xml.bind-api</artifactId>
+        </dependency>
+        <!-- End extra Jersey Java 11 jars -->
 
         <!--test dependencies -->
         <dependency>
@@ -105,9 +103,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-opentsdb/src/main/java/org/apache/storm/opentsdb/bolt/ITupleOpenTsdbDatapointMapper.java b/external/storm-opentsdb/src/main/java/org/apache/storm/opentsdb/bolt/ITupleOpenTsdbDatapointMapper.java
index a89448b..c385bba 100644
--- a/external/storm-opentsdb/src/main/java/org/apache/storm/opentsdb/bolt/ITupleOpenTsdbDatapointMapper.java
+++ b/external/storm-opentsdb/src/main/java/org/apache/storm/opentsdb/bolt/ITupleOpenTsdbDatapointMapper.java
@@ -34,6 +34,6 @@
      *
      * @param tuple tuple instance
      */
-    public OpenTsdbMetricDatapoint getMetricPoint(ITuple tuple);
+    OpenTsdbMetricDatapoint getMetricPoint(ITuple tuple);
 
 }
diff --git a/external/storm-opentsdb/src/main/java/org/apache/storm/opentsdb/client/ClientResponse.java b/external/storm-opentsdb/src/main/java/org/apache/storm/opentsdb/client/ClientResponse.java
index 96acb12..bb51b5c 100644
--- a/external/storm-opentsdb/src/main/java/org/apache/storm/opentsdb/client/ClientResponse.java
+++ b/external/storm-opentsdb/src/main/java/org/apache/storm/opentsdb/client/ClientResponse.java
@@ -30,7 +30,7 @@
 public interface ClientResponse extends Serializable {
 
 
-    public class Summary implements ClientResponse {
+    class Summary implements ClientResponse {
         private int failed;
         private int success;
         private int timeouts;
@@ -95,7 +95,7 @@
         }
     }
 
-    public class Details extends Summary {
+    class Details extends Summary {
         private List<Error> errors;
 
         public Details() {
diff --git a/external/storm-pmml/pom.xml b/external/storm-pmml/pom.xml
index 0a834ff..5fc677a 100644
--- a/external/storm-pmml/pom.xml
+++ b/external/storm-pmml/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
     <modelVersion>4.0.0</modelVersion>
@@ -92,10 +92,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-redis/pom.xml b/external/storm-redis/pom.xml
index 1938d46..f1cc269 100644
--- a/external/storm-redis/pom.xml
+++ b/external/storm-redis/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -85,9 +85,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/external/storm-redis/src/main/java/org/apache/storm/redis/common/container/JedisCommandsInstanceContainer.java b/external/storm-redis/src/main/java/org/apache/storm/redis/common/container/JedisCommandsInstanceContainer.java
index 82b56cf..49a3373 100644
--- a/external/storm-redis/src/main/java/org/apache/storm/redis/common/container/JedisCommandsInstanceContainer.java
+++ b/external/storm-redis/src/main/java/org/apache/storm/redis/common/container/JedisCommandsInstanceContainer.java
@@ -35,5 +35,5 @@
      * Release Container.
      */
     @Override
-    public void close();
+    void close();
 }
diff --git a/external/storm-rocketmq/pom.xml b/external/storm-rocketmq/pom.xml
index c981c7a..4874208 100644
--- a/external/storm-rocketmq/pom.xml
+++ b/external/storm-rocketmq/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
diff --git a/external/storm-solr/pom.xml b/external/storm-solr/pom.xml
index 5deee1c..135e7e4 100644
--- a/external/storm-solr/pom.xml
+++ b/external/storm-solr/pom.xml
@@ -20,7 +20,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
     <modelVersion>4.0.0</modelVersion>
@@ -123,9 +123,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/flux/flux-core/pom.xml b/flux/flux-core/pom.xml
index 33b61c8..1734b5c 100644
--- a/flux/flux-core/pom.xml
+++ b/flux/flux-core/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <groupId>org.apache.storm</groupId>
         <artifactId>flux</artifactId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
 
@@ -116,9 +116,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/flux/flux-core/src/main/java/org/apache/storm/flux/api/TopologySource.java b/flux/flux-core/src/main/java/org/apache/storm/flux/api/TopologySource.java
index 1944c38..a18b650 100644
--- a/flux/flux-core/src/main/java/org/apache/storm/flux/api/TopologySource.java
+++ b/flux/flux-core/src/main/java/org/apache/storm/flux/api/TopologySource.java
@@ -35,5 +35,5 @@
  *
  */
 public interface TopologySource {
-    public StormTopology getTopology(Map<String, Object> config);
+    StormTopology getTopology(Map<String, Object> config);
 }
diff --git a/flux/flux-core/src/main/java/org/apache/storm/flux/model/GroupingDef.java b/flux/flux-core/src/main/java/org/apache/storm/flux/model/GroupingDef.java
index f191e3c..06885f6 100644
--- a/flux/flux-core/src/main/java/org/apache/storm/flux/model/GroupingDef.java
+++ b/flux/flux-core/src/main/java/org/apache/storm/flux/model/GroupingDef.java
@@ -28,7 +28,7 @@
     /**
      * Types of stream groupings Storm allows.
      */
-    public static enum Type {
+    public enum Type {
         ALL,
         CUSTOM,
         DIRECT,
diff --git a/flux/flux-examples/pom.xml b/flux/flux-examples/pom.xml
index 2b7741a..ee212d3 100644
--- a/flux/flux-examples/pom.xml
+++ b/flux/flux-examples/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <groupId>org.apache.storm</groupId>
         <artifactId>flux</artifactId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
 
@@ -149,9 +149,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/flux/flux-wrappers/pom.xml b/flux/flux-wrappers/pom.xml
index d3131f7..6f37cd3 100644
--- a/flux/flux-wrappers/pom.xml
+++ b/flux/flux-wrappers/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <groupId>org.apache.storm</groupId>
         <artifactId>flux</artifactId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
 
@@ -54,9 +54,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/flux/pom.xml b/flux/pom.xml
index 99f1ce1..638a086 100644
--- a/flux/pom.xml
+++ b/flux/pom.xml
@@ -26,7 +26,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
 
diff --git a/integration-test/pom.xml b/integration-test/pom.xml
index e92fc97..65610ba 100755
--- a/integration-test/pom.xml
+++ b/integration-test/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>..</relativePath>
     </parent>
 
@@ -174,9 +174,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/pom.xml b/pom.xml
index 18ad44c..0601a84 100644
--- a/pom.xml
+++ b/pom.xml
@@ -26,7 +26,7 @@
 
     <groupId>org.apache.storm</groupId>
     <artifactId>storm</artifactId>
-    <version>2.0.1-SNAPSHOT</version>
+    <version>2.2.0-SNAPSHOT</version>
     <packaging>pom</packaging>
     <name>Storm</name>
     <description>Distributed and fault-tolerant realtime computation</description>
@@ -240,10 +240,9 @@
     </developers>
 
     <scm>
-        <connection>scm:git:https://git-wip-us.apache.org/repos/asf/storm.git</connection>
-        <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/storm.git</developerConnection>
-        <tag>v2.0.0</tag>
-        <url>https://git-wip-us.apache.org/repos/asf/storm</url>
+        <connection>scm:git:https://gitbox.apache.org/repos/asf/storm.git</connection>
+        <developerConnection>scm:git:https://gitbox.apache.org/repos/asf/storm.git</developerConnection>
+        <url>https://gitbox.apache.org/repos/asf/storm</url>
     </scm>
 
     <issueManagement>
@@ -254,8 +253,11 @@
     <properties>
         <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
         <test.extra.args>-Djava.net.preferIPv4Stack=true</test.extra.args>
+        <!-- license-maven-plugin that needs to be overridable from command line -->
         <license.skipAggregateAddThirdParty>true</license.skipAggregateAddThirdParty>
         <license.skipAggregateDownloadLicenses>true</license.skipAggregateDownloadLicenses>
+        <license.thirdPartyFilename>DEPENDENCY-LICENSES</license.thirdPartyFilename>
+        <license.outputDirectory>${project.basedir}</license.outputDirectory>
 
         <!-- dependency versions -->
         <clojure.version>1.10.0</clojure.version>
@@ -323,12 +325,15 @@
         <maven-resolver.version>1.3.3</maven-resolver.version>
         <maven.version>3.6.0</maven.version>
         <azure-eventhubs.version>0.13.1</azure-eventhubs.version>
-        <jersey.version>2.27</jersey.version>
+        <jersey.version>2.29</jersey.version>
         <dropwizard.version>1.3.5</dropwizard.version>
         <j2html.version>1.0.0</j2html.version>
         <jool.version>0.9.12</jool.version>
         <caffeine.version>2.3.5</caffeine.version>
+        <jakarta-jaxb-version>2.3.2</jakarta-jaxb-version>
+        <jakarta-activation-version>1.2.1</jakarta-activation-version>
         <jaxb-version>2.3.0</jaxb-version>
+        <activation-version>1.1.1</activation-version>
         <rocksdb-version>5.18.3</rocksdb-version>
 
         <!-- see intellij profile below... This fixes an annoyance with intellij -->
@@ -386,7 +391,6 @@
                     <plugin>
                         <groupId>org.apache.rat</groupId>
                         <artifactId>apache-rat-plugin</artifactId>
-                        <version>0.12</version>
                         <executions>
                             <execution>
                                 <phase>test</phase>
@@ -685,20 +689,37 @@
     <dependencyManagement>
         <dependencies>
             <dependency>
-                <groupId>org.glassfish.jersey.core</groupId>
-                <artifactId>jersey-server</artifactId>
+                <groupId>org.glassfish.jersey</groupId>
+                <artifactId>jersey-bom</artifactId>
                 <version>${jersey.version}</version>
-            </dependency> 
+                <type>pom</type>
+                <scope>import</scope>
+            </dependency>
             <dependency>
-                <groupId>org.glassfish.jersey.containers</groupId> 
-                <artifactId>jersey-container-servlet-core</artifactId>
-                <version>${jersey.version}</version>
-            </dependency> 
+                <groupId>com.sun.activation</groupId>
+                <artifactId>jakarta.activation</artifactId>
+                <version>${jakarta-activation-version}</version>
+            </dependency>
             <dependency>
-                <groupId>org.glassfish.jersey.containers</groupId> 
-                <artifactId>jersey-container-jetty-http</artifactId>
-                <version>${jersey.version}</version>
-            </dependency> 
+                <groupId>jakarta.activation</groupId>
+                <artifactId>jakarta.activation-api</artifactId>
+                <version>${jakarta-activation-version}</version>
+            </dependency>
+            <dependency>
+                <groupId>jakarta.xml.bind</groupId>
+                <artifactId>jakarta.xml.bind-api</artifactId>
+                <version>${jakarta-jaxb-version}</version>
+            </dependency>
+            <dependency>
+                <groupId>javax.xml.bind</groupId>
+                <artifactId>jaxb-api</artifactId>
+                <version>${jaxb-version}</version>
+            </dependency>
+            <dependency>
+                <groupId>javax.activation</groupId>
+                <artifactId>activation</artifactId>
+                <version>${activation-version}</version>
+            </dependency>
             <dependency>
                 <groupId>org.hdrhistogram</groupId>
                 <artifactId>HdrHistogram</artifactId>
@@ -1006,16 +1027,6 @@
                 <version>${caffeine.version}</version>
             </dependency>
             <dependency>
-                <groupId>javax.xml.bind</groupId>
-                <artifactId>jaxb-api</artifactId>
-                <version>${jaxb-version}</version>
-            </dependency>
-            <dependency>
-                <groupId>javax.activation</groupId>
-                <artifactId>activation</artifactId>
-                <version>1.1.1</version>
-            </dependency>
-            <dependency>
                 <groupId>javax.annotation</groupId>
                 <artifactId>javax.annotation-api</artifactId>
                 <version>1.3.2</version>
@@ -1219,7 +1230,8 @@
                                 <configLocation>storm/storm_checkstyle.xml</configLocation>
                                 <encoding>UTF-8</encoding>
                                 <failOnViolation>true</failOnViolation>
-                                <logViolationsToConsole>false</logViolationsToConsole>
+                                <logViolationsToConsole>true</logViolationsToConsole>
+                                <consoleOutput>true</consoleOutput>
                                 <outputFile>target/checkstyle-violation.xml</outputFile>
                                 <violationSeverity>warning</violationSeverity>
                             </configuration>
@@ -1292,6 +1304,111 @@
                     <artifactId>exec-maven-plugin</artifactId>
                     <version>1.6.0</version>
                 </plugin>
+                <plugin>
+                    <groupId>org.codehaus.mojo</groupId>
+                    <artifactId>license-maven-plugin</artifactId>
+                    <version>2.0.0</version>
+                    <configuration>
+                        <useMissingFile>true</useMissingFile>
+                        <failOnMissing>true</failOnMissing>
+                        <includeTransitiveDependencies>true</includeTransitiveDependencies>
+                        <fileTemplate>/org/codehaus/mojo/license/third-party-file-groupByMultiLicense.ftl</fileTemplate>
+                        <excludedScopes>system,test</excludedScopes>
+                        <excludedGroups>${project.groupId}</excludedGroups>
+                        <licenseMerges>
+                            <licenseMerge>
+                                Apache License, Version 2.0 |
+                                Apache License, version 2.0 |
+                                Apache License Version 2 |
+                                Apache License Version 2.0 |
+                                Apache License version 2.0 |
+                                Apache 2 |
+                                Apache 2.0 |
+                                Apache License, 2.0 |
+                                Apache License 2 |
+                                Apache License 2.0 |
+                                Apache Public License 2.0 |
+                                Apache Software License - Version 2.0 |
+                                Apache v2 |
+                                ASL, version 2 |
+                                The Apache License, Version 2.0 |
+                                The Apache Software License, Version 2.0
+                            </licenseMerge>
+                            <licenseMerge>
+                                Apache License | 
+                                Apache Software Licenses
+                            </licenseMerge>
+                            <licenseMerge>
+                                BSD License |
+                                BSD license |
+                                BSD |
+                                The BSD License
+                            </licenseMerge>
+                            <licenseMerge>
+                                BSD 3-Clause License |
+                                BSD 3-Clause |
+                                BSD 3-clause |
+                                The BSD 3-Clause License |
+                                New BSD License |
+                                New BSD license
+                            </licenseMerge>
+                            <licenseMerge>
+                                Common Development and Distribution License (CDDL) v1.0 |
+                                COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 |
+                                CDDL 1.0
+                            </licenseMerge>
+                            <licenseMerge>
+                                Common Development and Distribution License (CDDL) v1.1 | 
+                                COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.1 |
+                                CDDL 1.1 |
+                                Common Development and Distribution License (CDDL), Version 1.1
+                            </licenseMerge>
+                            <licenseMerge>
+                                Common Development and Distribution License | 
+                                <!-- Multilicense, choosing CDDL -->
+                                CDDL+GPL |
+                                CDDL+GPL License |
+                                CDDL + GPLv2 with classpath exception
+                            </licenseMerge>
+                            <licenseMerge>
+                                Eclipse Public License, Version 1.0 |
+                                Eclipse Public License 1.0 |
+                                Eclipse Public License - v 1.0
+                            </licenseMerge>
+                            <licenseMerge>
+                                Eclipse Public License, Version 2.0 |
+                                EPL-2.0 |
+                                EPL 2.0
+                            </licenseMerge>
+                            <licenseMerge>
+                                Eclipse Distribution License, Version 1.0 |
+                                Eclipse Distribution License - v 1.0 |
+                                EDL 1.0
+                            </licenseMerge>
+                            <licenseMerge>
+                                MIT License |
+                                The MIT License |
+                                MIT license |
+                                MIT X11 License |
+                                MIT
+                            </licenseMerge>
+                            <licenseMerge>
+                                The GNU General Public License (GPL), Version 2, With Classpath Exception |
+                                GPL2 w/ CPE
+                            </licenseMerge>
+                            <licenseMerge>
+                                GNU Lesser General Public License (LGPL), Version 2.1 |
+                                LGPL, version 2.1 |
+                                GNU Lesser General Public License Version 2.1 |
+                                GNU Lesser General Public License, version 2.1
+                            </licenseMerge>
+                            <licenseMerge>
+                                Common Public License Version 1.0 |
+                                Common Public License - v 1.0
+                            </licenseMerge>
+                        </licenseMerges>
+                    </configuration>
+                </plugin>
             </plugins>
         </pluginManagement>
 
@@ -1356,6 +1473,29 @@
                     </execution>
                 </executions>
             </plugin>
+            <plugin>
+                <groupId>org.codehaus.mojo</groupId>
+                <artifactId>license-maven-plugin</artifactId>
+                <inherited>false</inherited>
+                <configuration>
+                    <missingFile>${project.basedir}/THIRD-PARTY.properties</missingFile>
+                    <aggregateMissingLicensesFile>${project.basedir}/THIRD-PARTY.properties</aggregateMissingLicensesFile>
+                </configuration>
+                <executions>
+                    <execution>
+                        <id>generate-and-check-licenses</id>
+                        <goals>
+                            <goal>aggregate-add-third-party</goal>
+                        </goals>
+                    </execution>
+                    <execution>
+                        <id>download-licenses</id>
+                        <goals>
+                            <goal>aggregate-download-licenses</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
         </plugins>
     </build>
 
diff --git a/sql/pom.xml b/sql/pom.xml
index a3f6178..1c4616e 100644
--- a/sql/pom.xml
+++ b/sql/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
 
diff --git a/sql/storm-sql-core/pom.xml b/sql/storm-sql-core/pom.xml
index 73636f9..463fbb3 100644
--- a/sql/storm-sql-core/pom.xml
+++ b/sql/storm-sql-core/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -206,9 +206,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSqlImpl.java b/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSqlImpl.java
index 2f30063..a91dcae 100644
--- a/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSqlImpl.java
+++ b/sql/storm-sql-core/src/jvm/org/apache/storm/sql/StormSqlImpl.java
@@ -36,7 +36,7 @@
 class StormSqlImpl extends StormSql {
     private final StormSqlContext sqlContext;
 
-    public StormSqlImpl() {
+    StormSqlImpl() {
         sqlContext = new StormSqlContext();
     }
 
diff --git a/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/RexNodeToJavaCodeCompiler.java b/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/RexNodeToJavaCodeCompiler.java
index 99e557f..ed62271 100644
--- a/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/RexNodeToJavaCodeCompiler.java
+++ b/sql/storm-sql-core/src/jvm/org/apache/storm/sql/compiler/RexNodeToJavaCodeCompiler.java
@@ -202,7 +202,7 @@
         public final Constructor constructor;
         public final Field field;
 
-        private StormBuiltInMethod(Method method, Constructor constructor, Field field) {
+        StormBuiltInMethod(Method method, Constructor constructor, Field field) {
             this.method = method;
             this.constructor = constructor;
             this.field = field;
diff --git a/sql/storm-sql-core/src/jvm/org/apache/storm/sql/planner/streams/StreamsStormRuleSets.java b/sql/storm-sql-core/src/jvm/org/apache/storm/sql/planner/streams/StreamsStormRuleSets.java
index 0fd2ae1..53a17b8 100644
--- a/sql/storm-sql-core/src/jvm/org/apache/storm/sql/planner/streams/StreamsStormRuleSets.java
+++ b/sql/storm-sql-core/src/jvm/org/apache/storm/sql/planner/streams/StreamsStormRuleSets.java
@@ -90,11 +90,11 @@
     private static class StormRuleSet implements RuleSet {
         final ImmutableSet<RelOptRule> rules;
 
-        public StormRuleSet(ImmutableSet<RelOptRule> rules) {
+        StormRuleSet(ImmutableSet<RelOptRule> rules) {
             this.rules = rules;
         }
 
-        public StormRuleSet(ImmutableList<RelOptRule> rules) {
+        StormRuleSet(ImmutableList<RelOptRule> rules) {
             this.rules = ImmutableSet.<RelOptRule>builder()
                 .addAll(rules)
                 .build();
diff --git a/sql/storm-sql-external/storm-sql-hdfs/pom.xml b/sql/storm-sql-external/storm-sql-hdfs/pom.xml
index 83d45af..20fa0e5 100644
--- a/sql/storm-sql-external/storm-sql-hdfs/pom.xml
+++ b/sql/storm-sql-external/storm-sql-hdfs/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../../pom.xml</relativePath>
     </parent>
 
diff --git a/sql/storm-sql-external/storm-sql-kafka/pom.xml b/sql/storm-sql-external/storm-sql-kafka/pom.xml
index 10244d7..f531973 100644
--- a/sql/storm-sql-external/storm-sql-kafka/pom.xml
+++ b/sql/storm-sql-external/storm-sql-kafka/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../../pom.xml</relativePath>
     </parent>
 
diff --git a/sql/storm-sql-external/storm-sql-kafka/src/jvm/org/apache/storm/sql/kafka/KafkaDataSourcesProvider.java b/sql/storm-sql-external/storm-sql-kafka/src/jvm/org/apache/storm/sql/kafka/KafkaDataSourcesProvider.java
index 2c22cc8..2af340d 100644
--- a/sql/storm-sql-external/storm-sql-kafka/src/jvm/org/apache/storm/sql/kafka/KafkaDataSourcesProvider.java
+++ b/sql/storm-sql-external/storm-sql-kafka/src/jvm/org/apache/storm/sql/kafka/KafkaDataSourcesProvider.java
@@ -83,7 +83,7 @@
         private final Properties props;
         private final IOutputSerializer serializer;
 
-        public KafkaStreamsDataSource(KafkaSpoutConfig<ByteBuffer, ByteBuffer> kafkaSpoutConfig, String bootstrapServers,
+        KafkaStreamsDataSource(KafkaSpoutConfig<ByteBuffer, ByteBuffer> kafkaSpoutConfig, String bootstrapServers,
             String topic, Properties props, IOutputSerializer serializer) {
             this.kafkaSpoutConfig = kafkaSpoutConfig;
             this.bootstrapServers = bootstrapServers;
diff --git a/sql/storm-sql-external/storm-sql-mongodb/pom.xml b/sql/storm-sql-external/storm-sql-mongodb/pom.xml
index 69e5196..9d4ec1f 100644
--- a/sql/storm-sql-external/storm-sql-mongodb/pom.xml
+++ b/sql/storm-sql-external/storm-sql-mongodb/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../../pom.xml</relativePath>
     </parent>
 
diff --git a/sql/storm-sql-external/storm-sql-redis/pom.xml b/sql/storm-sql-external/storm-sql-redis/pom.xml
index fd051cb..a9a488e 100644
--- a/sql/storm-sql-external/storm-sql-redis/pom.xml
+++ b/sql/storm-sql-external/storm-sql-redis/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../../pom.xml</relativePath>
     </parent>
 
diff --git a/sql/storm-sql-runtime/pom.xml b/sql/storm-sql-runtime/pom.xml
index 5d709ba..a1bc661 100644
--- a/sql/storm-sql-runtime/pom.xml
+++ b/sql/storm-sql-runtime/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
diff --git a/storm-buildtools/maven-shade-clojure-transformer/pom.xml b/storm-buildtools/maven-shade-clojure-transformer/pom.xml
index 650ad8f..cb65f46 100644
--- a/storm-buildtools/maven-shade-clojure-transformer/pom.xml
+++ b/storm-buildtools/maven-shade-clojure-transformer/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -41,9 +41,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/storm-buildtools/storm-maven-plugins/pom.xml b/storm-buildtools/storm-maven-plugins/pom.xml
index 9a8b946..1dfc781 100644
--- a/storm-buildtools/storm-maven-plugins/pom.xml
+++ b/storm-buildtools/storm-maven-plugins/pom.xml
@@ -21,7 +21,7 @@
   <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
   </parent>
 
@@ -74,9 +74,6 @@
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-checkstyle-plugin</artifactId>
         <!--Note - the version would be inherited-->
-        <configuration>
-          <maxAllowedViolations>0</maxAllowedViolations>
-        </configuration>
       </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
diff --git a/storm-buildtools/storm-maven-plugins/src/main/java/org/apache/storm/maven/plugin/util/CommandExec.java b/storm-buildtools/storm-maven-plugins/src/main/java/org/apache/storm/maven/plugin/util/CommandExec.java
index 8bad082..031bf74 100644
--- a/storm-buildtools/storm-maven-plugins/src/main/java/org/apache/storm/maven/plugin/util/CommandExec.java
+++ b/storm-buildtools/storm-maven-plugins/src/main/java/org/apache/storm/maven/plugin/util/CommandExec.java
@@ -56,7 +56,7 @@
         private List<String> output;
         private BufferedReader reader;
 
-        public OutputBufferThread(InputStream is) {
+        OutputBufferThread(InputStream is) {
             this.setDaemon(true);
             output = new ArrayList<String>();
             reader = new BufferedReader(new InputStreamReader(is));
diff --git a/storm-buildtools/storm-maven-plugins/src/main/java/org/apache/storm/maven/plugin/versioninfo/VersionInfoMojo.java b/storm-buildtools/storm-maven-plugins/src/main/java/org/apache/storm/maven/plugin/versioninfo/VersionInfoMojo.java
index eb1a3ae..1cbefeb 100644
--- a/storm-buildtools/storm-maven-plugins/src/main/java/org/apache/storm/maven/plugin/versioninfo/VersionInfoMojo.java
+++ b/storm-buildtools/storm-maven-plugins/src/main/java/org/apache/storm/maven/plugin/versioninfo/VersionInfoMojo.java
@@ -194,6 +194,8 @@
                     }
                 }
                 break;
+            case NONE:
+                break;
             default:
                 throw new IllegalArgumentException(String.format("SCM %s is not supported",
                         scm));
@@ -221,6 +223,8 @@
                     }
                 }
                 break;
+            case NONE:
+                break;
             default:
                 throw new IllegalArgumentException(String.format("SCM %s is not supported",
                         scm));
@@ -249,6 +253,8 @@
                     }
                 }
                 break;
+            case NONE:
+                break;
             default:
                 throw new IllegalArgumentException(String.format("SCM %s is not supported",
                         scm));
diff --git a/storm-checkstyle/pom.xml b/storm-checkstyle/pom.xml
index a7d406b..7f2861e 100644
--- a/storm-checkstyle/pom.xml
+++ b/storm-checkstyle/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>..</relativePath>
     </parent>
     <modelVersion>4.0.0</modelVersion>
diff --git a/storm-checkstyle/src/main/resources/storm/storm_checkstyle.xml b/storm-checkstyle/src/main/resources/storm/storm_checkstyle.xml
index 4b20a6f..a2a7689 100644
--- a/storm-checkstyle/src/main/resources/storm/storm_checkstyle.xml
+++ b/storm-checkstyle/src/main/resources/storm/storm_checkstyle.xml
@@ -257,5 +257,6 @@
             <property name="exceptionVariableName" value="expected"/>
         </module>
         <module name="CommentsIndentation"/>
+        <module name="RedundantModifier"/>
     </module>
 </module>
diff --git a/storm-client/pom.xml b/storm-client/pom.xml
index 77c2dd9..0ff861c 100644
--- a/storm-client/pom.xml
+++ b/storm-client/pom.xml
@@ -22,7 +22,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>..</relativePath>
     </parent>
 
@@ -69,14 +69,22 @@
         <!-- Java EE packages. On JDK8 and below this is a part of java,
         but JDK9+ it is not there by default. -->
         <dependency>
-            <groupId>javax.xml.bind</groupId>
-            <artifactId>jaxb-api</artifactId>
+            <groupId>com.sun.activation</groupId>
+            <artifactId>jakarta.activation</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>jakarta.activation</groupId>
+            <artifactId>jakarta.activation-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>jakarta.xml.bind</groupId>
+            <artifactId>jakarta.xml.bind-api</artifactId>
         </dependency>
         <dependency>
             <groupId>javax.annotation</groupId>
             <artifactId>javax.annotation-api</artifactId>
         </dependency>
-
+        
         <!-- kryo -->
         <dependency>
             <groupId>com.esotericsoftware</groupId>
@@ -166,7 +174,6 @@
                 <!--Note - the version would be inherited-->
                 <configuration>
                     <excludes>**/generated/**</excludes>
-                    <maxAllowedViolations>0</maxAllowedViolations>
                 </configuration>
             </plugin>
             <plugin>
@@ -300,6 +307,28 @@
                             </execution>
                         </executions>
                     </plugin>
+                    <plugin>
+                        <groupId>org.apache.storm</groupId>
+                        <artifactId>storm-maven-plugins</artifactId>
+                        <version>${project.version}</version>
+                        <executions>
+                            <execution>
+                                <id>version-info</id>
+                                <phase>generate-resources</phase>
+                                <goals>
+                                    <goal>version-info</goal>
+                                </goals>
+                                <configuration>
+                                    <source>
+                                        <directory>${basedir}/src/</directory>
+                                        <includes>
+                                            <include>jvm/**/*.java</include>
+                                        </includes>
+                                    </source>
+                                </configuration>
+                            </execution>
+                        </executions>
+                    </plugin>
                 </plugins>
             </build>
         </profile>
diff --git a/storm-client/src/jvm/org/apache/storm/Config.java b/storm-client/src/jvm/org/apache/storm/Config.java
index a2aa58a..b46c112 100644
--- a/storm-client/src/jvm/org/apache/storm/Config.java
+++ b/storm-client/src/jvm/org/apache/storm/Config.java
@@ -316,6 +316,12 @@
     @IsPositiveNumber
     public static final String TOPOLOGY_RAS_CONSTRAINT_MAX_STATE_SEARCH = "topology.ras.constraint.max.state.search";
     /**
+     * Whether to limit each worker to one executor. This is useful for debugging topologies to clearly identify workers that
+     * are slow/crashing and for estimating resource requirements and capacity.
+     */
+    @IsBoolean
+    public static final String TOPOLOGY_RAS_ONE_EXECUTOR_PER_WORKER = "topology.ras.one.executor.per.worker";
+    /**
      * The maximum number of seconds to spend scheduling a topology using the constraint solver.  Null means no limit.
      */
     @IsInteger
@@ -1412,12 +1418,12 @@
     @IsInteger
     public static final String STORM_BLOBSTORE_INPUTSTREAM_BUFFER_SIZE_BYTES = "storm.blobstore.inputstream.buffer.size.bytes";
     /**
-     * What chuck size to use for storm client to upload dependency jars.
+     * What chunk size to use for storm client to upload dependency jars.
      */
     @IsPositiveNumber
     @IsInteger
-    public static final String STORM_BLOBSTORE_DEPENDENCY_JAR_UPLOAD_CHUCK_SIZE_BYTES =
-            "storm.blobstore.dependency.jar.upload.chuck.size.bytes";
+    public static final String STORM_BLOBSTORE_DEPENDENCY_JAR_UPLOAD_CHUNK_SIZE_BYTES =
+            "storm.blobstore.dependency.jar.upload.chunk.size.bytes";
     /**
      * FQCN of a class that implements {@code ISubmitterHook} @see ISubmitterHook for details.
      */
diff --git a/storm-client/src/jvm/org/apache/storm/ICredentialsListener.java b/storm-client/src/jvm/org/apache/storm/ICredentialsListener.java
index 975bbe7..bb25b6a 100644
--- a/storm-client/src/jvm/org/apache/storm/ICredentialsListener.java
+++ b/storm-client/src/jvm/org/apache/storm/ICredentialsListener.java
@@ -23,5 +23,5 @@
      *
      * @param credentials the new credentials, could be null.
      */
-    public void setCredentials(Map<String, String> credentials);
+    void setCredentials(Map<String, String> credentials);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/ISubmitterHook.java b/storm-client/src/jvm/org/apache/storm/ISubmitterHook.java
index 84329b4..b4f9268 100644
--- a/storm-client/src/jvm/org/apache/storm/ISubmitterHook.java
+++ b/storm-client/src/jvm/org/apache/storm/ISubmitterHook.java
@@ -21,5 +21,5 @@
  * class's notify method will be invoked when a topology is successfully submitted via StormSubmitter class.
  */
 public interface ISubmitterHook {
-    public void notify(TopologyInfo topologyInfo, Map<String, Object> topoConf, StormTopology topology) throws IllegalAccessException;
+    void notify(TopologyInfo topologyInfo, Map<String, Object> topoConf, StormTopology topology) throws IllegalAccessException;
 }
diff --git a/storm-client/src/jvm/org/apache/storm/StormSubmitter.java b/storm-client/src/jvm/org/apache/storm/StormSubmitter.java
index ed97e13..9302852 100644
--- a/storm-client/src/jvm/org/apache/storm/StormSubmitter.java
+++ b/storm-client/src/jvm/org/apache/storm/StormSubmitter.java
@@ -536,7 +536,7 @@
          * @param targetFile - destination file
          * @param totalBytes - total number of bytes of the file
          */
-        public void onStart(String srcFile, String targetFile, long totalBytes);
+        void onStart(String srcFile, String targetFile, long totalBytes);
 
         /**
          * called whenever a chunk of bytes is uploaded.
@@ -546,7 +546,7 @@
          * @param bytesUploaded - number of bytes transferred so far
          * @param totalBytes    - total number of bytes of the file
          */
-        public void onProgress(String srcFile, String targetFile, long bytesUploaded, long totalBytes);
+        void onProgress(String srcFile, String targetFile, long bytesUploaded, long totalBytes);
 
         /**
          * called when the file is uploaded.
@@ -555,6 +555,6 @@
          * @param targetFile - destination file
          * @param totalBytes - total number of bytes of the file
          */
-        public void onCompleted(String srcFile, String targetFile, long totalBytes);
+        void onCompleted(String srcFile, String targetFile, long totalBytes);
     }
 }
diff --git a/storm-client/src/jvm/org/apache/storm/blobstore/BlobStoreAclHandler.java b/storm-client/src/jvm/org/apache/storm/blobstore/BlobStoreAclHandler.java
index 2270142..df6ca54 100644
--- a/storm-client/src/jvm/org/apache/storm/blobstore/BlobStoreAclHandler.java
+++ b/storm-client/src/jvm/org/apache/storm/blobstore/BlobStoreAclHandler.java
@@ -363,7 +363,7 @@
         return resultAcl;
     }
 
-    private final List<AccessControl> normalizeSettableAcls(String key, List<AccessControl> acls, Subject who,
+    private List<AccessControl> normalizeSettableAcls(String key, List<AccessControl> acls, Subject who,
                                                             int opMask) {
         List<AccessControl> cleanAcls = removeBadAcls(acls);
         Set<String> userNames = getUserNamesFromSubject(who);
diff --git a/storm-client/src/jvm/org/apache/storm/callback/ZKStateChangedCallback.java b/storm-client/src/jvm/org/apache/storm/callback/ZKStateChangedCallback.java
index 30f9b5d..1a15997 100644
--- a/storm-client/src/jvm/org/apache/storm/callback/ZKStateChangedCallback.java
+++ b/storm-client/src/jvm/org/apache/storm/callback/ZKStateChangedCallback.java
@@ -16,5 +16,5 @@
 
 @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public interface ZKStateChangedCallback {
-    public void changed(Watcher.Event.EventType type, String path);
+    void changed(Watcher.Event.EventType type, String path);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/cluster/IStateStorage.java b/storm-client/src/jvm/org/apache/storm/cluster/IStateStorage.java
index da16ea3..d0b45af 100644
--- a/storm-client/src/jvm/org/apache/storm/cluster/IStateStorage.java
+++ b/storm-client/src/jvm/org/apache/storm/cluster/IStateStorage.java
@@ -192,7 +192,7 @@
      *
      * @param listener A StateStorageListener to handle changing cluster state events.
      */
-    void add_listener(final ConnectionStateListener listener);
+    void add_listener(ConnectionStateListener listener);
 
     /**
      * Force consistency on a path. Any writes committed on the path before this call will be completely propagated when it returns.
diff --git a/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCommonOperation.java b/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCommonOperation.java
index 63802a0..82aa41a 100755
--- a/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCommonOperation.java
+++ b/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupCommonOperation.java
@@ -22,57 +22,57 @@
      *
      * @param taskid task id of task to add
      */
-    public void addTask(int taskid) throws IOException;
+    void addTask(int taskid) throws IOException;
 
     /**
      * Get a list of task ids running in CGroup.
      */
-    public Set<Integer> getTasks() throws IOException;
+    Set<Integer> getTasks() throws IOException;
 
     /**
      * add a process to cgroup.
      *
      * @param pid the PID of the process to add
      */
-    public void addProcs(int pid) throws IOException;
+    void addProcs(int pid) throws IOException;
 
     /**
      * get the PIDs of processes running in cgroup.
      */
-    public Set<Long> getPids() throws IOException;
+    Set<Long> getPids() throws IOException;
 
     /**
      * to get the notify_on_release config.
      */
-    public boolean getNotifyOnRelease() throws IOException;
+    boolean getNotifyOnRelease() throws IOException;
 
     /**
      * to set notify_on_release config in cgroup.
      */
-    public void setNotifyOnRelease(boolean flag) throws IOException;
+    void setNotifyOnRelease(boolean flag) throws IOException;
 
     /**
      * get the command for the relase agent to execute.
      */
-    public String getReleaseAgent() throws IOException;
+    String getReleaseAgent() throws IOException;
 
     /**
      * set a command for the release agent to execute.
      */
-    public void setReleaseAgent(String command) throws IOException;
+    void setReleaseAgent(String command) throws IOException;
 
     /**
      * get the cgroup.clone_children config.
      */
-    public boolean getCgroupCloneChildren() throws IOException;
+    boolean getCgroupCloneChildren() throws IOException;
 
     /**
      * Set the cgroup.clone_children config.
      */
-    public void setCgroupCloneChildren(boolean flag) throws IOException;
+    void setCgroupCloneChildren(boolean flag) throws IOException;
 
     /**
      * set event control config.
      */
-    public void setEventControl(String eventFd, String controlFd, String... args) throws IOException;
+    void setEventControl(String eventFd, String controlFd, String... args) throws IOException;
 }
diff --git a/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupOperation.java b/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupOperation.java
index 6387fe3..7897057 100755
--- a/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupOperation.java
+++ b/storm-client/src/jvm/org/apache/storm/container/cgroup/CgroupOperation.java
@@ -25,50 +25,50 @@
     /**
      * Get a list of hierarchies.
      */
-    public List<Hierarchy> getHierarchies();
+    List<Hierarchy> getHierarchies();
 
     /**
      * get a list of available subsystems.
      */
-    public Set<SubSystem> getSubSystems();
+    Set<SubSystem> getSubSystems();
 
     /**
      * Check if a subsystem is enabled.
      */
-    public boolean isSubSystemEnabled(SubSystemType subsystem);
+    boolean isSubSystemEnabled(SubSystemType subsystem);
 
     /**
      * get the first hierarchy that has a certain subsystem isMounted.
      */
-    public Hierarchy getHierarchyWithSubSystem(SubSystemType subsystem);
+    Hierarchy getHierarchyWithSubSystem(SubSystemType subsystem);
 
     /**
      * get the first hierarchy that has a certain list of subsystems isMounted.
      */
-    public Hierarchy getHierarchyWithSubSystems(List<SubSystemType> subSystems);
+    Hierarchy getHierarchyWithSubSystems(List<SubSystemType> subSystems);
 
     /**
      * check if a hiearchy is mounted.
      */
-    public boolean isMounted(Hierarchy hierarchy);
+    boolean isMounted(Hierarchy hierarchy);
 
     /**
      * mount a hierarchy.
      */
-    public void mount(Hierarchy hierarchy) throws IOException;
+    void mount(Hierarchy hierarchy) throws IOException;
 
     /**
      * umount a heirarchy.
      */
-    public void umount(Hierarchy hierarchy) throws IOException;
+    void umount(Hierarchy hierarchy) throws IOException;
 
     /**
      * create a cgroup.
      */
-    public void createCgroup(CgroupCommon cgroup) throws SecurityException;
+    void createCgroup(CgroupCommon cgroup) throws SecurityException;
 
     /**
      * delete a cgroup.
      */
-    public void deleteCgroup(CgroupCommon cgroup) throws IOException;
+    void deleteCgroup(CgroupCommon cgroup) throws IOException;
 }
diff --git a/storm-client/src/jvm/org/apache/storm/container/cgroup/core/CgroupCore.java b/storm-client/src/jvm/org/apache/storm/container/cgroup/core/CgroupCore.java
index de58226..229f047 100755
--- a/storm-client/src/jvm/org/apache/storm/container/cgroup/core/CgroupCore.java
+++ b/storm-client/src/jvm/org/apache/storm/container/cgroup/core/CgroupCore.java
@@ -16,6 +16,6 @@
 
 public interface CgroupCore {
 
-    public SubSystemType getType();
+    SubSystemType getType();
 
 }
diff --git a/storm-client/src/jvm/org/apache/storm/container/cgroup/core/CpuacctCore.java b/storm-client/src/jvm/org/apache/storm/container/cgroup/core/CpuacctCore.java
index 0370721..09bd0e5 100755
--- a/storm-client/src/jvm/org/apache/storm/container/cgroup/core/CpuacctCore.java
+++ b/storm-client/src/jvm/org/apache/storm/container/cgroup/core/CpuacctCore.java
@@ -58,7 +58,7 @@
         return result;
     }
 
-    public static enum StatType {
+    public enum StatType {
         user, system;
     }
 
diff --git a/storm-client/src/jvm/org/apache/storm/coordination/BatchSubtopologyBuilder.java b/storm-client/src/jvm/org/apache/storm/coordination/BatchSubtopologyBuilder.java
index 222edcf..734be36 100644
--- a/storm-client/src/jvm/org/apache/storm/coordination/BatchSubtopologyBuilder.java
+++ b/storm-client/src/jvm/org/apache/storm/coordination/BatchSubtopologyBuilder.java
@@ -141,7 +141,7 @@
         public final Map<String, Object> componentConf = new HashMap<>();
         public final Set<SharedMemory> sharedMemory = new HashSet<>();
 
-        public Component(IRichBolt bolt, Integer parallelism) {
+        Component(IRichBolt bolt, Integer parallelism) {
             this.bolt = bolt;
             this.parallelism = parallelism;
         }
@@ -150,7 +150,7 @@
     private static class BoltDeclarerImpl extends BaseConfigurationDeclarer<BoltDeclarer> implements BoltDeclarer {
         Component component;
 
-        public BoltDeclarerImpl(Component component) {
+        BoltDeclarerImpl(Component component) {
             this.component = component;
         }
 
diff --git a/storm-client/src/jvm/org/apache/storm/coordination/CoordinatedBolt.java b/storm-client/src/jvm/org/apache/storm/coordination/CoordinatedBolt.java
index 18a14db..9e763f3 100644
--- a/storm-client/src/jvm/org/apache/storm/coordination/CoordinatedBolt.java
+++ b/storm-client/src/jvm/org/apache/storm/coordination/CoordinatedBolt.java
@@ -230,17 +230,17 @@
         }
     }
 
-    static enum TupleType {
+    enum TupleType {
         REGULAR,
         ID,
         COORD
     }
 
-    public static interface FinishedCallback {
+    public interface FinishedCallback {
         void finishedId(Object id);
     }
 
-    public static interface TimeoutCallback {
+    public interface TimeoutCallback {
         void timeoutId(Object id);
     }
 
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/DaemonCommon.java b/storm-client/src/jvm/org/apache/storm/daemon/DaemonCommon.java
index 4679177..d4f9376 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/DaemonCommon.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/DaemonCommon.java
@@ -13,5 +13,5 @@
 package org.apache.storm.daemon;
 
 public interface DaemonCommon {
-    public boolean isWaiting();
+    boolean isWaiting();
 }
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/Shutdownable.java b/storm-client/src/jvm/org/apache/storm/daemon/Shutdownable.java
index 5593676..4d9517b 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/Shutdownable.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/Shutdownable.java
@@ -13,5 +13,5 @@
 package org.apache.storm.daemon;
 
 public interface Shutdownable {
-    public void shutdown();
+    void shutdown();
 }
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/supervisor/AdvancedFSOps.java b/storm-client/src/jvm/org/apache/storm/daemon/supervisor/AdvancedFSOps.java
index 814ec12..d2a9ced 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/supervisor/AdvancedFSOps.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/supervisor/AdvancedFSOps.java
@@ -97,6 +97,18 @@
     }
 
     /**
+     * Moves a file to a given destination.
+     *
+     * @param fromFile file to move
+     * @param toFile where to move it
+     * @throws IOException on any error
+     */
+    @Override
+    public void moveFile(File fromFile, File toFile) throws IOException {
+        Files.move(fromFile.toPath(), toFile.toPath());
+    }
+
+    /**
      * Check whether supports atomic directory move.
      * @return true if an atomic directory move works, else false
      */
@@ -357,7 +369,7 @@
     private static class AdvancedRunAsUserFSOps extends AdvancedFSOps {
         private final Map<String, Object> conf;
 
-        public AdvancedRunAsUserFSOps(Map<String, Object> conf) {
+        AdvancedRunAsUserFSOps(Map<String, Object> conf) {
             super(conf);
             if (Utils.isOnWindows()) {
                 throw new UnsupportedOperationException("ERROR: Windows doesn't support running workers as different users yet");
@@ -418,7 +430,7 @@
     @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     private static class AdvancedWindowsFSOps extends AdvancedFSOps {
 
-        public AdvancedWindowsFSOps(Map<String, Object> conf) {
+        AdvancedWindowsFSOps(Map<String, Object> conf) {
             super(conf);
             if (ObjectReader.getBoolean(conf.get(Config.SUPERVISOR_RUN_WORKER_AS_USER), false)) {
                 throw new RuntimeException("ERROR: Windows doesn't support running workers as different users yet");
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/supervisor/ExitCodeCallback.java b/storm-client/src/jvm/org/apache/storm/daemon/supervisor/ExitCodeCallback.java
index de42895..54bf7fa 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/supervisor/ExitCodeCallback.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/supervisor/ExitCodeCallback.java
@@ -22,5 +22,5 @@
      *
      * @param exitCode the exit code of the finished process.
      */
-    public void call(int exitCode);
+    void call(int exitCode);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/supervisor/IAdvancedFSOps.java b/storm-client/src/jvm/org/apache/storm/daemon/supervisor/IAdvancedFSOps.java
index f55ba7f..ccb4a1b 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/supervisor/IAdvancedFSOps.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/supervisor/IAdvancedFSOps.java
@@ -46,6 +46,16 @@
      */
     void moveDirectoryPreferAtomic(File fromDir, File toDir) throws IOException;
 
+
+    /**
+     * Moves a file to a given destination.
+     *
+     * @param fromFile file to move
+     * @param toFile where to move it
+     * @throws IOException on any error
+     */
+    void moveFile(File fromFile, File toFile) throws IOException;
+
     /**
      * Check whether supports atomic directory move.
      * @return true if an atomic directory move works, else false
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/worker/BackPressureTracker.java b/storm-client/src/jvm/org/apache/storm/daemon/worker/BackPressureTracker.java
index 5800a00..dae5cca 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/worker/BackPressureTracker.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/worker/BackPressureTracker.java
@@ -101,7 +101,7 @@
         //No task is under backpressure initially
         private final AtomicBoolean backpressure = new AtomicBoolean(false);
 
-        public BackpressureState(JCQueue queue) {
+        BackpressureState(JCQueue queue) {
             this.queue = queue;
         }
 
diff --git a/storm-client/src/jvm/org/apache/storm/daemon/worker/WorkerTransfer.java b/storm-client/src/jvm/org/apache/storm/daemon/worker/WorkerTransfer.java
index dac4283..88f5921 100644
--- a/storm-client/src/jvm/org/apache/storm/daemon/worker/WorkerTransfer.java
+++ b/storm-client/src/jvm/org/apache/storm/daemon/worker/WorkerTransfer.java
@@ -49,7 +49,7 @@
 
     private final AtomicBoolean[] remoteBackPressureStatus; // [[remoteTaskId] -> true/false : indicates if remote task is under BP.
 
-    public WorkerTransfer(WorkerState workerState, Map<String, Object> topologyConf, int maxTaskIdInTopo) {
+    WorkerTransfer(WorkerState workerState, Map<String, Object> topologyConf, int maxTaskIdInTopo) {
         this.workerState = workerState;
         this.backPressureWaitStrategy = IWaitStrategy.createBackPressureWaitStrategy(topologyConf);
         this.drainer = new TransferDrainer();
diff --git a/storm-client/src/jvm/org/apache/storm/dependency/DependencyUploader.java b/storm-client/src/jvm/org/apache/storm/dependency/DependencyUploader.java
index 1f6b4f4..85ab835 100644
--- a/storm-client/src/jvm/org/apache/storm/dependency/DependencyUploader.java
+++ b/storm-client/src/jvm/org/apache/storm/dependency/DependencyUploader.java
@@ -48,11 +48,11 @@
 
     private final Map<String, Object> conf;
     private ClientBlobStore blobStore;
-    private int uploadChuckSize;
+    private final int uploadChunkSize;
 
     public DependencyUploader() {
         conf = Utils.readStormConfig();
-        this.uploadChuckSize = ObjectReader.getInt(conf.get(Config.STORM_BLOBSTORE_DEPENDENCY_JAR_UPLOAD_CHUCK_SIZE_BYTES), 1024 * 1024);
+        this.uploadChunkSize = ObjectReader.getInt(conf.get(Config.STORM_BLOBSTORE_DEPENDENCY_JAR_UPLOAD_CHUNK_SIZE_BYTES), 1024 * 1024);
     }
 
     public void init() {
@@ -164,7 +164,7 @@
             try {
                 blob = getBlobStore().createBlob(key, new SettableBlobMeta(acls));
                 try (InputStream in = Files.newInputStream(dependency.toPath())) {
-                    IOUtils.copy(in, blob, this.uploadChuckSize);
+                    IOUtils.copy(in, blob, this.uploadChunkSize);
                 }
                 blob.close();
                 blob = null;
diff --git a/storm-client/src/jvm/org/apache/storm/drpc/DRPCSpout.java b/storm-client/src/jvm/org/apache/storm/drpc/DRPCSpout.java
index 8756fca..4c35c67 100644
--- a/storm-client/src/jvm/org/apache/storm/drpc/DRPCSpout.java
+++ b/storm-client/src/jvm/org/apache/storm/drpc/DRPCSpout.java
@@ -258,7 +258,7 @@
         String id;
         int index;
 
-        public DRPCMessageId(String id, int index) {
+        DRPCMessageId(String id, int index) {
             this.id = id;
             this.index = index;
         }
@@ -269,7 +269,7 @@
         private int port;
         private Map<String, Object> conf;
 
-        public Adder(String server, int port, Map<String, Object> conf) {
+        Adder(String server, int port, Map<String, Object> conf) {
             this.server = server;
             this.port = port;
             this.conf = conf;
diff --git a/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCInputDeclarer.java b/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCInputDeclarer.java
index d026ec2..af82d04 100644
--- a/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCInputDeclarer.java
+++ b/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCInputDeclarer.java
@@ -18,40 +18,40 @@
 
 @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public interface LinearDRPCInputDeclarer extends ComponentConfigurationDeclarer<LinearDRPCInputDeclarer> {
-    public LinearDRPCInputDeclarer fieldsGrouping(Fields fields);
+    LinearDRPCInputDeclarer fieldsGrouping(Fields fields);
 
-    public LinearDRPCInputDeclarer fieldsGrouping(String streamId, Fields fields);
+    LinearDRPCInputDeclarer fieldsGrouping(String streamId, Fields fields);
 
-    public LinearDRPCInputDeclarer globalGrouping();
+    LinearDRPCInputDeclarer globalGrouping();
 
-    public LinearDRPCInputDeclarer globalGrouping(String streamId);
+    LinearDRPCInputDeclarer globalGrouping(String streamId);
 
-    public LinearDRPCInputDeclarer shuffleGrouping();
+    LinearDRPCInputDeclarer shuffleGrouping();
 
-    public LinearDRPCInputDeclarer shuffleGrouping(String streamId);
+    LinearDRPCInputDeclarer shuffleGrouping(String streamId);
 
-    public LinearDRPCInputDeclarer localOrShuffleGrouping();
+    LinearDRPCInputDeclarer localOrShuffleGrouping();
 
-    public LinearDRPCInputDeclarer localOrShuffleGrouping(String streamId);
+    LinearDRPCInputDeclarer localOrShuffleGrouping(String streamId);
 
-    public LinearDRPCInputDeclarer noneGrouping();
+    LinearDRPCInputDeclarer noneGrouping();
 
-    public LinearDRPCInputDeclarer noneGrouping(String streamId);
+    LinearDRPCInputDeclarer noneGrouping(String streamId);
 
-    public LinearDRPCInputDeclarer allGrouping();
+    LinearDRPCInputDeclarer allGrouping();
 
-    public LinearDRPCInputDeclarer allGrouping(String streamId);
+    LinearDRPCInputDeclarer allGrouping(String streamId);
 
-    public LinearDRPCInputDeclarer directGrouping();
+    LinearDRPCInputDeclarer directGrouping();
 
-    public LinearDRPCInputDeclarer directGrouping(String streamId);
+    LinearDRPCInputDeclarer directGrouping(String streamId);
 
-    public LinearDRPCInputDeclarer partialKeyGrouping(Fields fields);
+    LinearDRPCInputDeclarer partialKeyGrouping(Fields fields);
 
-    public LinearDRPCInputDeclarer partialKeyGrouping(String streamId, Fields fields);
+    LinearDRPCInputDeclarer partialKeyGrouping(String streamId, Fields fields);
 
-    public LinearDRPCInputDeclarer customGrouping(CustomStreamGrouping grouping);
+    LinearDRPCInputDeclarer customGrouping(CustomStreamGrouping grouping);
 
-    public LinearDRPCInputDeclarer customGrouping(String streamId, CustomStreamGrouping grouping);
+    LinearDRPCInputDeclarer customGrouping(String streamId, CustomStreamGrouping grouping);
 
 }
diff --git a/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCTopologyBuilder.java b/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCTopologyBuilder.java
index 6512976..3092f9a 100644
--- a/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCTopologyBuilder.java
+++ b/storm-client/src/jvm/org/apache/storm/drpc/LinearDRPCTopologyBuilder.java
@@ -174,8 +174,8 @@
         return builder.createTopology();
     }
 
-    private static interface InputDeclaration {
-        public void declare(String prevComponent, InputDeclarer declarer);
+    private interface InputDeclaration {
+        void declare(String prevComponent, InputDeclarer declarer);
     }
 
     private static class Component {
@@ -185,7 +185,7 @@
         public final List<InputDeclaration> declarations = new ArrayList<>();
         public final Set<SharedMemory> sharedMemory = new HashSet<>();
 
-        public Component(IRichBolt bolt, int parallelism) {
+        Component(IRichBolt bolt, int parallelism) {
             this.bolt = bolt;
             this.parallelism = parallelism;
         }
@@ -194,7 +194,7 @@
     private static class InputDeclarerImpl extends BaseConfigurationDeclarer<LinearDRPCInputDeclarer> implements LinearDRPCInputDeclarer {
         Component component;
 
-        public InputDeclarerImpl(Component component) {
+        InputDeclarerImpl(Component component) {
             this.component = component;
         }
 
diff --git a/storm-client/src/jvm/org/apache/storm/generated/SupervisorSummary.java b/storm-client/src/jvm/org/apache/storm/generated/SupervisorSummary.java
index 8cb28ec..3e39bad 100644
--- a/storm-client/src/jvm/org/apache/storm/generated/SupervisorSummary.java
+++ b/storm-client/src/jvm/org/apache/storm/generated/SupervisorSummary.java
@@ -39,6 +39,7 @@
   private static final org.apache.storm.thrift.protocol.TField USED_CPU_FIELD_DESC = new org.apache.storm.thrift.protocol.TField("used_cpu", org.apache.storm.thrift.protocol.TType.DOUBLE, (short)9);
   private static final org.apache.storm.thrift.protocol.TField FRAGMENTED_MEM_FIELD_DESC = new org.apache.storm.thrift.protocol.TField("fragmented_mem", org.apache.storm.thrift.protocol.TType.DOUBLE, (short)10);
   private static final org.apache.storm.thrift.protocol.TField FRAGMENTED_CPU_FIELD_DESC = new org.apache.storm.thrift.protocol.TField("fragmented_cpu", org.apache.storm.thrift.protocol.TType.DOUBLE, (short)11);
+  private static final org.apache.storm.thrift.protocol.TField BLACKLISTED_FIELD_DESC = new org.apache.storm.thrift.protocol.TField("blacklisted", org.apache.storm.thrift.protocol.TType.BOOL, (short)12);
 
   private static final org.apache.storm.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new SupervisorSummaryStandardSchemeFactory();
   private static final org.apache.storm.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new SupervisorSummaryTupleSchemeFactory();
@@ -54,6 +55,7 @@
   private double used_cpu; // optional
   private double fragmented_mem; // optional
   private double fragmented_cpu; // optional
+  private boolean blacklisted; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.storm.thrift.TFieldIdEnum {
@@ -67,7 +69,8 @@
     USED_MEM((short)8, "used_mem"),
     USED_CPU((short)9, "used_cpu"),
     FRAGMENTED_MEM((short)10, "fragmented_mem"),
-    FRAGMENTED_CPU((short)11, "fragmented_cpu");
+    FRAGMENTED_CPU((short)11, "fragmented_cpu"),
+    BLACKLISTED((short)12, "blacklisted");
 
     private static final java.util.Map<java.lang.String, _Fields> byName = new java.util.HashMap<java.lang.String, _Fields>();
 
@@ -105,6 +108,8 @@
           return FRAGMENTED_MEM;
         case 11: // FRAGMENTED_CPU
           return FRAGMENTED_CPU;
+        case 12: // BLACKLISTED
+          return BLACKLISTED;
         default:
           return null;
       }
@@ -153,8 +158,9 @@
   private static final int __USED_CPU_ISSET_ID = 4;
   private static final int __FRAGMENTED_MEM_ISSET_ID = 5;
   private static final int __FRAGMENTED_CPU_ISSET_ID = 6;
+  private static final int __BLACKLISTED_ISSET_ID = 7;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.VERSION,_Fields.TOTAL_RESOURCES,_Fields.USED_MEM,_Fields.USED_CPU,_Fields.FRAGMENTED_MEM,_Fields.FRAGMENTED_CPU};
+  private static final _Fields optionals[] = {_Fields.VERSION,_Fields.TOTAL_RESOURCES,_Fields.USED_MEM,_Fields.USED_CPU,_Fields.FRAGMENTED_MEM,_Fields.FRAGMENTED_CPU,_Fields.BLACKLISTED};
   public static final java.util.Map<_Fields, org.apache.storm.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     java.util.Map<_Fields, org.apache.storm.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.storm.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -182,6 +188,8 @@
         new org.apache.storm.thrift.meta_data.FieldValueMetaData(org.apache.storm.thrift.protocol.TType.DOUBLE)));
     tmpMap.put(_Fields.FRAGMENTED_CPU, new org.apache.storm.thrift.meta_data.FieldMetaData("fragmented_cpu", org.apache.storm.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.storm.thrift.meta_data.FieldValueMetaData(org.apache.storm.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.BLACKLISTED, new org.apache.storm.thrift.meta_data.FieldMetaData("blacklisted", org.apache.storm.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.storm.thrift.meta_data.FieldValueMetaData(org.apache.storm.thrift.protocol.TType.BOOL)));
     metaDataMap = java.util.Collections.unmodifiableMap(tmpMap);
     org.apache.storm.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SupervisorSummary.class, metaDataMap);
   }
@@ -234,6 +242,7 @@
     this.used_cpu = other.used_cpu;
     this.fragmented_mem = other.fragmented_mem;
     this.fragmented_cpu = other.fragmented_cpu;
+    this.blacklisted = other.blacklisted;
   }
 
   public SupervisorSummary deepCopy() {
@@ -261,6 +270,8 @@
     this.fragmented_mem = 0.0;
     set_fragmented_cpu_isSet(false);
     this.fragmented_cpu = 0.0;
+    set_blacklisted_isSet(false);
+    this.blacklisted = false;
   }
 
   @org.apache.storm.thrift.annotation.Nullable
@@ -524,6 +535,28 @@
     __isset_bitfield = org.apache.storm.thrift.EncodingUtils.setBit(__isset_bitfield, __FRAGMENTED_CPU_ISSET_ID, value);
   }
 
+  public boolean is_blacklisted() {
+    return this.blacklisted;
+  }
+
+  public void set_blacklisted(boolean blacklisted) {
+    this.blacklisted = blacklisted;
+    set_blacklisted_isSet(true);
+  }
+
+  public void unset_blacklisted() {
+    __isset_bitfield = org.apache.storm.thrift.EncodingUtils.clearBit(__isset_bitfield, __BLACKLISTED_ISSET_ID);
+  }
+
+  /** Returns true if field blacklisted is set (has been assigned a value) and false otherwise */
+  public boolean is_set_blacklisted() {
+    return org.apache.storm.thrift.EncodingUtils.testBit(__isset_bitfield, __BLACKLISTED_ISSET_ID);
+  }
+
+  public void set_blacklisted_isSet(boolean value) {
+    __isset_bitfield = org.apache.storm.thrift.EncodingUtils.setBit(__isset_bitfield, __BLACKLISTED_ISSET_ID, value);
+  }
+
   public void setFieldValue(_Fields field, @org.apache.storm.thrift.annotation.Nullable java.lang.Object value) {
     switch (field) {
     case HOST:
@@ -614,6 +647,14 @@
       }
       break;
 
+    case BLACKLISTED:
+      if (value == null) {
+        unset_blacklisted();
+      } else {
+        set_blacklisted((java.lang.Boolean)value);
+      }
+      break;
+
     }
   }
 
@@ -653,6 +694,9 @@
     case FRAGMENTED_CPU:
       return get_fragmented_cpu();
 
+    case BLACKLISTED:
+      return is_blacklisted();
+
     }
     throw new java.lang.IllegalStateException();
   }
@@ -686,6 +730,8 @@
       return is_set_fragmented_mem();
     case FRAGMENTED_CPU:
       return is_set_fragmented_cpu();
+    case BLACKLISTED:
+      return is_set_blacklisted();
     }
     throw new java.lang.IllegalStateException();
   }
@@ -804,6 +850,15 @@
         return false;
     }
 
+    boolean this_present_blacklisted = true && this.is_set_blacklisted();
+    boolean that_present_blacklisted = true && that.is_set_blacklisted();
+    if (this_present_blacklisted || that_present_blacklisted) {
+      if (!(this_present_blacklisted && that_present_blacklisted))
+        return false;
+      if (this.blacklisted != that.blacklisted)
+        return false;
+    }
+
     return true;
   }
 
@@ -849,6 +904,10 @@
     if (is_set_fragmented_cpu())
       hashCode = hashCode * 8191 + org.apache.storm.thrift.TBaseHelper.hashCode(fragmented_cpu);
 
+    hashCode = hashCode * 8191 + ((is_set_blacklisted()) ? 131071 : 524287);
+    if (is_set_blacklisted())
+      hashCode = hashCode * 8191 + ((blacklisted) ? 131071 : 524287);
+
     return hashCode;
   }
 
@@ -970,6 +1029,16 @@
         return lastComparison;
       }
     }
+    lastComparison = java.lang.Boolean.valueOf(is_set_blacklisted()).compareTo(other.is_set_blacklisted());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (is_set_blacklisted()) {
+      lastComparison = org.apache.storm.thrift.TBaseHelper.compareTo(this.blacklisted, other.blacklisted);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -1062,6 +1131,12 @@
       sb.append(this.fragmented_cpu);
       first = false;
     }
+    if (is_set_blacklisted()) {
+      if (!first) sb.append(", ");
+      sb.append("blacklisted:");
+      sb.append(this.blacklisted);
+      first = false;
+    }
     sb.append(")");
     return sb.toString();
   }
@@ -1227,6 +1302,14 @@
               org.apache.storm.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 12: // BLACKLISTED
+            if (schemeField.type == org.apache.storm.thrift.protocol.TType.BOOL) {
+              struct.blacklisted = iprot.readBool();
+              struct.set_blacklisted_isSet(true);
+            } else { 
+              org.apache.storm.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           default:
             org.apache.storm.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -1301,6 +1384,11 @@
         oprot.writeDouble(struct.fragmented_cpu);
         oprot.writeFieldEnd();
       }
+      if (struct.is_set_blacklisted()) {
+        oprot.writeFieldBegin(BLACKLISTED_FIELD_DESC);
+        oprot.writeBool(struct.blacklisted);
+        oprot.writeFieldEnd();
+      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -1342,7 +1430,10 @@
       if (struct.is_set_fragmented_cpu()) {
         optionals.set(5);
       }
-      oprot.writeBitSet(optionals, 6);
+      if (struct.is_set_blacklisted()) {
+        optionals.set(6);
+      }
+      oprot.writeBitSet(optionals, 7);
       if (struct.is_set_version()) {
         oprot.writeString(struct.version);
       }
@@ -1368,6 +1459,9 @@
       if (struct.is_set_fragmented_cpu()) {
         oprot.writeDouble(struct.fragmented_cpu);
       }
+      if (struct.is_set_blacklisted()) {
+        oprot.writeBool(struct.blacklisted);
+      }
     }
 
     @Override
@@ -1383,7 +1477,7 @@
       struct.set_num_used_workers_isSet(true);
       struct.supervisor_id = iprot.readString();
       struct.set_supervisor_id_isSet(true);
-      java.util.BitSet incoming = iprot.readBitSet(6);
+      java.util.BitSet incoming = iprot.readBitSet(7);
       if (incoming.get(0)) {
         struct.version = iprot.readString();
         struct.set_version_isSet(true);
@@ -1419,6 +1513,10 @@
         struct.fragmented_cpu = iprot.readDouble();
         struct.set_fragmented_cpu_isSet(true);
       }
+      if (incoming.get(6)) {
+        struct.blacklisted = iprot.readBool();
+        struct.set_blacklisted_isSet(true);
+      }
     }
   }
 
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/ConnectionWithStatus.java b/storm-client/src/jvm/org/apache/storm/messaging/ConnectionWithStatus.java
index da20253..f682335 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/ConnectionWithStatus.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/ConnectionWithStatus.java
@@ -19,7 +19,7 @@
      */
     public abstract Status status();
 
-    public static enum Status {
+    public enum Status {
 
         /**
          * we are establishing a active connection with target host. The new data sending request can be buffered for future sending, or
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/IConnectionCallback.java b/storm-client/src/jvm/org/apache/storm/messaging/IConnectionCallback.java
index 5719eb8..4ccda6d 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/IConnectionCallback.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/IConnectionCallback.java
@@ -23,5 +23,5 @@
      *
      * @param batch the messages to be processed
      */
-    public void recv(List<TaskMessage> batch);
+    void recv(List<TaskMessage> batch);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/local/Context.java b/storm-client/src/jvm/org/apache/storm/messaging/local/Context.java
index 69552f8..67ab6dd 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/local/Context.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/local/Context.java
@@ -79,7 +79,7 @@
         final int port;
         final IConnectionCallback cb;
 
-        public LocalServer(int port, IConnectionCallback cb) {
+        LocalServer(int port, IConnectionCallback cb) {
             this.port = port;
             this.cb = cb;
         }
@@ -129,7 +129,7 @@
         private final int port;
         private final String registryKey;
 
-        public LocalClient(String stormId, int port) {
+        LocalClient(String stormId, int port) {
             this.port = port;
             this.registryKey = getNodeKey(stormId, port);
             pendingDueToUnregisteredServer = new LinkedBlockingQueue<>();
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/Client.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/Client.java
index eac0f22..5819924 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/Client.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/Client.java
@@ -527,7 +527,7 @@
 
         private final InetSocketAddress address;
 
-        public Connect(InetSocketAddress address) {
+        Connect(InetSocketAddress address) {
             this.address = address;
         }
 
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/ControlMessage.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/ControlMessage.java
index beb4515..5bda0e7 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/ControlMessage.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/ControlMessage.java
@@ -25,8 +25,7 @@
 
     private final short code;
 
-    //private constructor
-    private ControlMessage(short code) {
+    ControlMessage(short code) {
         this.code = code;
     }
 
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyServer.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyServer.java
index e7f0c14..ee410d6 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyServer.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/KerberosSaslNettyServer.java
@@ -168,7 +168,7 @@
          */
         private List<String> authorizedUsers;
 
-        public KerberosSaslCallbackHandler(List<String> authorizedUsers) {
+        KerberosSaslCallbackHandler(List<String> authorizedUsers) {
             LOG.debug("KerberosSaslCallback: Creating KerberosSaslCallback handler.");
             this.authorizedUsers = authorizedUsers;
         }
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyClient.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyClient.java
index fe4de08..0af993a 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyClient.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyClient.java
@@ -96,7 +96,7 @@
         /**
          * Set private members using topology token.
          */
-        public SaslClientCallbackHandler(String topologyToken, byte[] token) {
+        SaslClientCallbackHandler(String topologyToken, byte[] token) {
             this.userName = SaslUtils
                 .encodeIdentifier(topologyToken.getBytes());
             this.userPassword = SaslUtils.encodePassword(token);
diff --git a/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyServer.java b/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyServer.java
index 6c04d76..45967b9 100644
--- a/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyServer.java
+++ b/storm-client/src/jvm/org/apache/storm/messaging/netty/SaslNettyServer.java
@@ -88,7 +88,7 @@
         private byte[] userPassword;
         private String userName;
 
-        public SaslDigestCallbackHandler(String topologyName, byte[] token) {
+        SaslDigestCallbackHandler(String topologyName, byte[] token) {
             LOG.debug("SaslDigestCallback: Creating SaslDigestCallback handler with topology token: {}", topologyName);
             this.userName = topologyName;
             this.userPassword = token;
diff --git a/storm-client/src/jvm/org/apache/storm/metric/MetricsConsumerBolt.java b/storm-client/src/jvm/org/apache/storm/metric/MetricsConsumerBolt.java
index e76868a..311055f 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/MetricsConsumerBolt.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/MetricsConsumerBolt.java
@@ -107,7 +107,7 @@
         private IMetricsConsumer.TaskInfo taskInfo;
         private Collection<IMetricsConsumer.DataPoint> dataPoints;
 
-        public MetricsTask(IMetricsConsumer.TaskInfo taskInfo, Collection<IMetricsConsumer.DataPoint> dataPoints) {
+        MetricsTask(IMetricsConsumer.TaskInfo taskInfo, Collection<IMetricsConsumer.DataPoint> dataPoints) {
             this.taskInfo = taskInfo;
             this.dataPoints = dataPoints;
         }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/SystemBolt.java b/storm-client/src/jvm/org/apache/storm/metric/SystemBolt.java
index 5e978a7..f2efc2f 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/SystemBolt.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/SystemBolt.java
@@ -106,7 +106,7 @@
     private static class MemoryUsageMetric implements IMetric {
         Supplier<MemoryUsage> getUsage;
 
-        public MemoryUsageMetric(Supplier<MemoryUsage> getUsage) {
+        MemoryUsageMetric(Supplier<MemoryUsage> getUsage) {
             this.getUsage = getUsage;
         }
 
@@ -131,7 +131,7 @@
         Long collectionCount;
         Long collectionTime;
 
-        public GarbageCollectorMetric(GarbageCollectorMXBean gcBean) {
+        GarbageCollectorMetric(GarbageCollectorMXBean gcBean) {
             this.gcBean = gcBean;
         }
 
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/ICombiner.java b/storm-client/src/jvm/org/apache/storm/metric/api/ICombiner.java
index d897ba7..26dcb82 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/ICombiner.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/ICombiner.java
@@ -13,7 +13,7 @@
 package org.apache.storm.metric.api;
 
 public interface ICombiner<T> {
-    public T identity();
+    T identity();
 
-    public T combine(T a, T b);
+    T combine(T a, T b);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/IMetric.java b/storm-client/src/jvm/org/apache/storm/metric/api/IMetric.java
index c6f86f4..b6670fc 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/IMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/IMetric.java
@@ -23,5 +23,5 @@
      *     java.util.Collection)}. If null is returned nothing will be sent. If this value can be reset, like with a counter, a side effect
      *     of calling this should be that the value is reset.
      */
-    public Object getValueAndReset();
+    Object getValueAndReset();
 }
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/IMetricsConsumer.java b/storm-client/src/jvm/org/apache/storm/metric/api/IMetricsConsumer.java
index a332dc4..1fe21c7 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/IMetricsConsumer.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/IMetricsConsumer.java
@@ -25,7 +25,7 @@
 
     void cleanup();
 
-    public static class TaskInfo {
+    class TaskInfo {
         public String srcWorkerHost;
         public int srcWorkerPort;
         public String srcComponentId;
@@ -56,7 +56,7 @@
     }
 
     // We can't move this to outside without breaking backward compatibility.
-    public static class DataPoint {
+    class DataPoint {
         public String name;
         public Object value;
 
diff --git a/storm-client/src/jvm/org/apache/storm/metric/api/rpc/IShellMetric.java b/storm-client/src/jvm/org/apache/storm/metric/api/rpc/IShellMetric.java
index a66e178..910730e 100644
--- a/storm-client/src/jvm/org/apache/storm/metric/api/rpc/IShellMetric.java
+++ b/storm-client/src/jvm/org/apache/storm/metric/api/rpc/IShellMetric.java
@@ -23,5 +23,5 @@
      *     Object can be any json support types: String, Long, Double, Boolean, Null, List, Map
      */
     @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
-    public void updateMetricFromRPC(Object value);
+    void updateMetricFromRPC(Object value);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/security/auth/IAuthorizer.java b/storm-client/src/jvm/org/apache/storm/security/auth/IAuthorizer.java
index 6662827..fd244c0 100644
--- a/storm-client/src/jvm/org/apache/storm/security/auth/IAuthorizer.java
+++ b/storm-client/src/jvm/org/apache/storm/security/auth/IAuthorizer.java
@@ -41,5 +41,5 @@
      * @param topoConf  configuration of targeted topology
      * @return true if the request is authorized, false if reject
      */
-    public boolean permit(ReqContext context, String operation, Map<String, Object> topoConf);
+    boolean permit(ReqContext context, String operation, Map<String, Object> topoConf);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/security/auth/IAutoCredentials.java b/storm-client/src/jvm/org/apache/storm/security/auth/IAutoCredentials.java
index 2f8edd4..ac836ca 100644
--- a/storm-client/src/jvm/org/apache/storm/security/auth/IAutoCredentials.java
+++ b/storm-client/src/jvm/org/apache/storm/security/auth/IAutoCredentials.java
@@ -20,14 +20,14 @@
  */
 public interface IAutoCredentials {
 
-    public void prepare(Map<String, Object> conf);
+    void prepare(Map<String, Object> conf);
 
     /**
      * Called to populate the credentials on the client side.
      *
      * @param credentials the credentials to be populated.
      */
-    public void populateCredentials(Map<String, String> credentials);
+    void populateCredentials(Map<String, String> credentials);
 
     /**
      * Called to initially populate the subject on the worker side with credentials passed in.
@@ -35,7 +35,7 @@
      * @param subject     the subject to optionally put credentials in.
      * @param credentials the credentials to be used.
      */
-    public void populateSubject(Subject subject, Map<String, String> credentials);
+    void populateSubject(Subject subject, Map<String, String> credentials);
 
 
     /**
@@ -45,6 +45,6 @@
      * @param subject     the subject to optionally put credentials in.
      * @param credentials the credentials to be used.
      */
-    public void updateSubject(Subject subject, Map<String, String> credentials);
+    void updateSubject(Subject subject, Map<String, String> credentials);
 
 }
diff --git a/storm-client/src/jvm/org/apache/storm/security/auth/IGroupMappingServiceProvider.java b/storm-client/src/jvm/org/apache/storm/security/auth/IGroupMappingServiceProvider.java
index 3c7edda..4bf3b0e 100644
--- a/storm-client/src/jvm/org/apache/storm/security/auth/IGroupMappingServiceProvider.java
+++ b/storm-client/src/jvm/org/apache/storm/security/auth/IGroupMappingServiceProvider.java
@@ -31,6 +31,6 @@
      * @param user User's name
      * @return group memberships of user
      */
-    public Set<String> getGroups(String user) throws IOException;
+    Set<String> getGroups(String user) throws IOException;
 
 }
diff --git a/storm-client/src/jvm/org/apache/storm/security/auth/ITransportPlugin.java b/storm-client/src/jvm/org/apache/storm/security/auth/ITransportPlugin.java
index b1118cb..6bf3b0b 100644
--- a/storm-client/src/jvm/org/apache/storm/security/auth/ITransportPlugin.java
+++ b/storm-client/src/jvm/org/apache/storm/security/auth/ITransportPlugin.java
@@ -39,7 +39,7 @@
      * @param processor service handler
      * @return server
      */
-    public TServer getServer(TProcessor processor) throws IOException, TTransportException;
+    TServer getServer(TProcessor processor) throws IOException, TTransportException;
 
     /**
      * Connect to the specified server via framed transport.
@@ -49,14 +49,14 @@
      * @param asUser     the user as which the connection should be established, and all the subsequent actions should be executed. Only
      *                   applicable when using secure storm cluster. A null/blank value here will just indicate to use the logged in user.
      */
-    public TTransport connect(TTransport transport, String serverHost, String asUser) throws IOException, TTransportException;
+    TTransport connect(TTransport transport, String serverHost, String asUser) throws IOException, TTransportException;
 
     /**
      * Get port.
      * @return The port this transport is using. This is not known until
      * {@link #getServer(org.apache.storm.thrift.TProcessor)} has been called
      */
-    public int getPort();
+    int getPort();
 
     /**
      * Check if worker tokens are supported by this transport.
diff --git a/storm-client/src/jvm/org/apache/storm/security/auth/kerberos/KerberosSaslTransportPlugin.java b/storm-client/src/jvm/org/apache/storm/security/auth/kerberos/KerberosSaslTransportPlugin.java
index 2cd43ba..915473b 100644
--- a/storm-client/src/jvm/org/apache/storm/security/auth/kerberos/KerberosSaslTransportPlugin.java
+++ b/storm-client/src/jvm/org/apache/storm/security/auth/kerberos/KerberosSaslTransportPlugin.java
@@ -255,7 +255,7 @@
         private final Subject subject;
         private final TTransportFactory wrapped;
 
-        public TUGIAssumingTransportFactory(TTransportFactory wrapped, Subject subject) {
+        TUGIAssumingTransportFactory(TTransportFactory wrapped, Subject subject) {
             this.wrapped = wrapped;
             this.subject = subject;
 
@@ -293,7 +293,7 @@
     private class LoginCacheKey {
         private String keyString = null;
 
-        public LoginCacheKey(SortedMap<String, ?> authConf) throws IOException {
+        LoginCacheKey(SortedMap<String, ?> authConf) throws IOException {
             if (authConf != null) {
                 StringBuilder stringBuilder = new StringBuilder();
                 for (String configKey : authConf.keySet()) {
diff --git a/storm-client/src/jvm/org/apache/storm/serialization/SerializationRegister.java b/storm-client/src/jvm/org/apache/storm/serialization/SerializationRegister.java
index 06954ce..e714d46 100644
--- a/storm-client/src/jvm/org/apache/storm/serialization/SerializationRegister.java
+++ b/storm-client/src/jvm/org/apache/storm/serialization/SerializationRegister.java
@@ -30,5 +30,5 @@
      *
      * @param kryo what to register the serializers with.
      */
-    public void register(Kryo kryo) throws Exception;
+    void register(Kryo kryo) throws Exception;
 }
diff --git a/storm-client/src/jvm/org/apache/storm/spout/MultiScheme.java b/storm-client/src/jvm/org/apache/storm/spout/MultiScheme.java
index dd50490..0d123b3 100644
--- a/storm-client/src/jvm/org/apache/storm/spout/MultiScheme.java
+++ b/storm-client/src/jvm/org/apache/storm/spout/MultiScheme.java
@@ -18,7 +18,7 @@
 import org.apache.storm.tuple.Fields;
 
 public interface MultiScheme extends Serializable {
-    public Iterable<List<Object>> deserialize(ByteBuffer ser);
+    Iterable<List<Object>> deserialize(ByteBuffer ser);
 
-    public Fields getOutputFields();
+    Fields getOutputFields();
 }
diff --git a/storm-client/src/jvm/org/apache/storm/spout/ShellSpout.java b/storm-client/src/jvm/org/apache/storm/spout/ShellSpout.java
index 5312909..07ab24c 100644
--- a/storm-client/src/jvm/org/apache/storm/spout/ShellSpout.java
+++ b/storm-client/src/jvm/org/apache/storm/spout/ShellSpout.java
@@ -282,7 +282,7 @@
     private class SpoutHeartbeatTimerTask extends TimerTask {
         private ShellSpout spout;
 
-        public SpoutHeartbeatTimerTask(ShellSpout spout) {
+        SpoutHeartbeatTimerTask(ShellSpout spout) {
             this.spout = spout;
         }
 
diff --git a/storm-client/src/jvm/org/apache/storm/task/IMetricsContext.java b/storm-client/src/jvm/org/apache/storm/task/IMetricsContext.java
index 340cafd..958ce5f 100644
--- a/storm-client/src/jvm/org/apache/storm/task/IMetricsContext.java
+++ b/storm-client/src/jvm/org/apache/storm/task/IMetricsContext.java
@@ -46,13 +46,13 @@
     @Deprecated
     CombinedMetric registerMetric(String name, ICombiner combiner, int timeBucketSizeInSecs);
     
-    public Timer registerTimer(String name);
+    Timer registerTimer(String name);
 
-    public Histogram registerHistogram(String name);
+    Histogram registerHistogram(String name);
 
-    public Meter registerMeter(String name);
+    Meter registerMeter(String name);
 
-    public Counter registerCounter(String name);
+    Counter registerCounter(String name);
 
-    public <T> Gauge<T> registerGauge(String name, Gauge<T> gauge);
+    <T> Gauge<T> registerGauge(String name, Gauge<T> gauge);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/task/ShellBolt.java b/storm-client/src/jvm/org/apache/storm/task/ShellBolt.java
index dfb6383..fca718e 100644
--- a/storm-client/src/jvm/org/apache/storm/task/ShellBolt.java
+++ b/storm-client/src/jvm/org/apache/storm/task/ShellBolt.java
@@ -308,7 +308,7 @@
     private class BoltHeartbeatTimerTask extends TimerTask {
         private ShellBolt bolt;
 
-        public BoltHeartbeatTimerTask(ShellBolt bolt) {
+        BoltHeartbeatTimerTask(ShellBolt bolt) {
             this.bolt = bolt;
         }
 
@@ -363,8 +363,7 @@
                             handleMetrics(shellMsg);
                             break;
                         default:
-                            throw new IllegalArgumentException(String.format("command %s is not supported",
-                                    command));
+                            break;
                     }
                 } catch (InterruptedException e) {
                     // It's likely that Bolt is shutting down so no need to die.
diff --git a/storm-client/src/jvm/org/apache/storm/testing/AckFailDelegate.java b/storm-client/src/jvm/org/apache/storm/testing/AckFailDelegate.java
index b441d4e..19bf202 100644
--- a/storm-client/src/jvm/org/apache/storm/testing/AckFailDelegate.java
+++ b/storm-client/src/jvm/org/apache/storm/testing/AckFailDelegate.java
@@ -15,7 +15,7 @@
 import java.io.Serializable;
 
 public interface AckFailDelegate extends Serializable {
-    public void ack(Object id);
+    void ack(Object id);
 
-    public void fail(Object id);
+    void fail(Object id);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/testing/SpoutTracker.java b/storm-client/src/jvm/org/apache/storm/testing/SpoutTracker.java
index 39ef158..00ab28d 100644
--- a/storm-client/src/jvm/org/apache/storm/testing/SpoutTracker.java
+++ b/storm-client/src/jvm/org/apache/storm/testing/SpoutTracker.java
@@ -75,7 +75,7 @@
         public int emitted = 0;
         public SpoutOutputCollector collector;
 
-        public SpoutTrackOutputCollector(SpoutOutputCollector collector) {
+        SpoutTrackOutputCollector(SpoutOutputCollector collector) {
             this.collector = collector;
         }
 
diff --git a/storm-client/src/jvm/org/apache/storm/topology/InputDeclarer.java b/storm-client/src/jvm/org/apache/storm/topology/InputDeclarer.java
index 92c7c6a..75030ea 100644
--- a/storm-client/src/jvm/org/apache/storm/topology/InputDeclarer.java
+++ b/storm-client/src/jvm/org/apache/storm/topology/InputDeclarer.java
@@ -22,74 +22,74 @@
     /**
      * The stream is partitioned by the fields specified in the grouping.
      */
-    public T fieldsGrouping(String componentId, Fields fields);
+    T fieldsGrouping(String componentId, Fields fields);
 
     /**
      * The stream is partitioned by the fields specified in the grouping.
      */
-    public T fieldsGrouping(String componentId, String streamId, Fields fields);
+    T fieldsGrouping(String componentId, String streamId, Fields fields);
 
     /**
      * The entire stream goes to a single one of the bolt's tasks. Specifically, it goes to the task with the lowest id.
      */
-    public T globalGrouping(String componentId);
+    T globalGrouping(String componentId);
 
     /**
      * The entire stream goes to a single one of the bolt's tasks. Specifically, it goes to the task with the lowest id.
      */
-    public T globalGrouping(String componentId, String streamId);
+    T globalGrouping(String componentId, String streamId);
 
     /**
      * Tuples are randomly distributed across the bolt's tasks in a way such that each bolt is guaranteed to get an equal number of tuples.
      */
-    public T shuffleGrouping(String componentId);
+    T shuffleGrouping(String componentId);
 
     /**
      * Tuples are randomly distributed across the bolt's tasks in a way such that each bolt is guaranteed to get an equal number of tuples.
      */
-    public T shuffleGrouping(String componentId, String streamId);
+    T shuffleGrouping(String componentId, String streamId);
 
     /**
      * If the target bolt has one or more tasks in the same worker process, tuples will be shuffled to just those in-process tasks.
      * Otherwise, this acts like a normal shuffle grouping.
      */
-    public T localOrShuffleGrouping(String componentId);
+    T localOrShuffleGrouping(String componentId);
 
     /**
      * If the target bolt has one or more tasks in the same worker process, tuples will be shuffled to just those in-process tasks.
      * Otherwise, this acts like a normal shuffle grouping.
      */
-    public T localOrShuffleGrouping(String componentId, String streamId);
+    T localOrShuffleGrouping(String componentId, String streamId);
 
     /**
      * This grouping specifies that you don't care how the stream is grouped.
      */
-    public T noneGrouping(String componentId);
+    T noneGrouping(String componentId);
 
     /**
      * This grouping specifies that you don't care how the stream is grouped.
      */
-    public T noneGrouping(String componentId, String streamId);
+    T noneGrouping(String componentId, String streamId);
 
     /**
      * The stream is replicated across all the bolt's tasks. Use this grouping with care.
      */
-    public T allGrouping(String componentId);
+    T allGrouping(String componentId);
 
     /**
      * The stream is replicated across all the bolt's tasks. Use this grouping with care.
      */
-    public T allGrouping(String componentId, String streamId);
+    T allGrouping(String componentId, String streamId);
 
     /**
      * A stream grouped this way means that the producer of the tuple decides which task of the consumer will receive this tuple.
      */
-    public T directGrouping(String componentId);
+    T directGrouping(String componentId);
 
     /**
      * A stream grouped this way means that the producer of the tuple decides which task of the consumer will receive this tuple.
      */
-    public T directGrouping(String componentId, String streamId);
+    T directGrouping(String componentId, String streamId);
 
     /**
      * Tuples are passed to two hashing functions and each target task is decided based on the comparison of the state of candidate nodes.
@@ -97,7 +97,7 @@
      * <p>See https://melmeric.files.wordpress.com/2014/11/the-power-of-both-choices-practical-load-balancing-for-distributed-stream
      * -processing-engines.pdf
      */
-    public T partialKeyGrouping(String componentId, Fields fields);
+    T partialKeyGrouping(String componentId, Fields fields);
 
     /**
      * Tuples are passed to two hashing functions and each target task is decided based on the comparison of the state of candidate nodes.
@@ -105,18 +105,18 @@
      * <p>See https://melmeric.files.wordpress.com/2014/11/the-power-of-both-choices-practical-load-balancing-for-distributed-stream
      * -processing-engines.pdf
      */
-    public T partialKeyGrouping(String componentId, String streamId, Fields fields);
+    T partialKeyGrouping(String componentId, String streamId, Fields fields);
 
     /**
      * A custom stream grouping by implementing the CustomStreamGrouping interface.
      */
-    public T customGrouping(String componentId, CustomStreamGrouping grouping);
+    T customGrouping(String componentId, CustomStreamGrouping grouping);
 
     /**
      * A custom stream grouping by implementing the CustomStreamGrouping interface.
      */
-    public T customGrouping(String componentId, String streamId, CustomStreamGrouping grouping);
+    T customGrouping(String componentId, String streamId, CustomStreamGrouping grouping);
 
-    public T grouping(GlobalStreamId id, Grouping grouping);
+    T grouping(GlobalStreamId id, Grouping grouping);
 
 }
diff --git a/storm-client/src/jvm/org/apache/storm/topology/OutputFieldsDeclarer.java b/storm-client/src/jvm/org/apache/storm/topology/OutputFieldsDeclarer.java
index 453986a..77e51c1 100644
--- a/storm-client/src/jvm/org/apache/storm/topology/OutputFieldsDeclarer.java
+++ b/storm-client/src/jvm/org/apache/storm/topology/OutputFieldsDeclarer.java
@@ -19,11 +19,11 @@
     /**
      * Uses default stream id.
      */
-    public void declare(Fields fields);
+    void declare(Fields fields);
 
-    public void declare(boolean direct, Fields fields);
+    void declare(boolean direct, Fields fields);
 
-    public void declareStream(String streamId, Fields fields);
+    void declareStream(String streamId, Fields fields);
 
-    public void declareStream(String streamId, boolean direct, Fields fields);
+    void declareStream(String streamId, boolean direct, Fields fields);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/topology/PersistentWindowedBoltExecutor.java b/storm-client/src/jvm/org/apache/storm/topology/PersistentWindowedBoltExecutor.java
index f70db21..f055acc 100644
--- a/storm-client/src/jvm/org/apache/storm/topology/PersistentWindowedBoltExecutor.java
+++ b/storm-client/src/jvm/org/apache/storm/topology/PersistentWindowedBoltExecutor.java
@@ -236,7 +236,7 @@
      */
     private static class NoAckOutputCollector extends OutputCollector {
 
-        public NoAckOutputCollector(OutputCollector delegate) {
+        NoAckOutputCollector(OutputCollector delegate) {
             super(delegate);
         }
 
diff --git a/storm-client/src/jvm/org/apache/storm/topology/base/BaseTickTupleAwareRichBolt.java b/storm-client/src/jvm/org/apache/storm/topology/base/BaseTickTupleAwareRichBolt.java
index d901654..5e406c3 100644
--- a/storm-client/src/jvm/org/apache/storm/topology/base/BaseTickTupleAwareRichBolt.java
+++ b/storm-client/src/jvm/org/apache/storm/topology/base/BaseTickTupleAwareRichBolt.java
@@ -56,5 +56,5 @@
      *
      * @param tuple The input tuple to be processed.
      */
-    protected abstract void process(final Tuple tuple);
+    protected abstract void process(Tuple tuple);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/trident/fluent/ChainedAggregatorDeclarer.java b/storm-client/src/jvm/org/apache/storm/trident/fluent/ChainedAggregatorDeclarer.java
index 81ec21c..20fe5b3 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/fluent/ChainedAggregatorDeclarer.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/fluent/ChainedAggregatorDeclarer.java
@@ -170,13 +170,13 @@
         stream = stream.each(inputFields, new CombinerAggregatorInitImpl(agg), functionFields);
     }
 
-    private static enum AggType {
+    private enum AggType {
         PARTITION,
         FULL,
         FULL_COMBINE
     }
 
-    public static interface AggregationPartition {
+    public interface AggregationPartition {
         Stream partition(Stream input);
     }
 
@@ -186,7 +186,7 @@
         Aggregator agg;
         Fields outFields;
 
-        public AggSpec(Fields inFields, Aggregator agg, Fields outFields) {
+        AggSpec(Fields inFields, Aggregator agg, Fields outFields) {
             this.inFields = inFields;
             this.agg = agg;
             this.outFields = outFields;
diff --git a/storm-client/src/jvm/org/apache/storm/trident/operation/impl/SingleEmitAggregator.java b/storm-client/src/jvm/org/apache/storm/trident/operation/impl/SingleEmitAggregator.java
index 0e50439..bc579b3 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/operation/impl/SingleEmitAggregator.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/operation/impl/SingleEmitAggregator.java
@@ -71,7 +71,7 @@
         agg.cleanup();
     }
 
-    public static interface BatchToPartition extends Serializable {
+    public interface BatchToPartition extends Serializable {
         int partitionIndex(Object batchId, int numPartitions);
     }
 
@@ -80,7 +80,7 @@
         Object state;
         Object batchId;
 
-        public SingleEmitState(Object batchId) {
+        SingleEmitState(Object batchId) {
             this.batchId = batchId;
         }
     }
diff --git a/storm-client/src/jvm/org/apache/storm/trident/planner/SpoutNode.java b/storm-client/src/jvm/org/apache/storm/trident/planner/SpoutNode.java
index 15f0e2b..59f543c 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/planner/SpoutNode.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/planner/SpoutNode.java
@@ -27,7 +27,7 @@
         this.type = type;
     }
 
-    public static enum SpoutType {
+    public enum SpoutType {
         DRPC,
         BATCH
     }
diff --git a/storm-client/src/jvm/org/apache/storm/trident/spout/ICommitterTridentSpout.java b/storm-client/src/jvm/org/apache/storm/trident/spout/ICommitterTridentSpout.java
index 34e96bd..ec4081a 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/spout/ICommitterTridentSpout.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/spout/ICommitterTridentSpout.java
@@ -18,9 +18,9 @@
 
 public interface ICommitterTridentSpout<X> extends ITridentSpout<X> {
     @Override
-    public Emitter getEmitter(String txStateId, Map<String, Object> conf, TopologyContext context);
+    Emitter getEmitter(String txStateId, Map<String, Object> conf, TopologyContext context);
 
-    public interface Emitter extends ITridentSpout.Emitter {
+    interface Emitter extends ITridentSpout.Emitter {
         void commit(TransactionAttempt attempt);
     }
 }
diff --git a/storm-client/src/jvm/org/apache/storm/trident/spout/OpaquePartitionedTridentSpoutExecutor.java b/storm-client/src/jvm/org/apache/storm/trident/spout/OpaquePartitionedTridentSpoutExecutor.java
index 5fffe1f..a33351d 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/spout/OpaquePartitionedTridentSpoutExecutor.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/spout/OpaquePartitionedTridentSpoutExecutor.java
@@ -63,7 +63,7 @@
         public RotatingTransactionalState rotatingState;
         public ISpoutPartition partition;
 
-        public EmitterPartitionState(RotatingTransactionalState s, ISpoutPartition p) {
+        EmitterPartitionState(RotatingTransactionalState s, ISpoutPartition p) {
             rotatingState = s;
             partition = p;
         }
diff --git a/storm-client/src/jvm/org/apache/storm/trident/spout/PartitionedTridentSpoutExecutor.java b/storm-client/src/jvm/org/apache/storm/trident/spout/PartitionedTridentSpoutExecutor.java
index d0639b9..7f5304e 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/spout/PartitionedTridentSpoutExecutor.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/spout/PartitionedTridentSpoutExecutor.java
@@ -62,7 +62,7 @@
         public RotatingTransactionalState rotatingState;
         public ISpoutPartition partition;
 
-        public EmitterPartitionState(RotatingTransactionalState s, ISpoutPartition p) {
+        EmitterPartitionState(RotatingTransactionalState s, ISpoutPartition p) {
             rotatingState = s;
             partition = p;
         }
@@ -71,7 +71,7 @@
     class Coordinator implements ITridentSpout.BatchCoordinator<Object> {
         private IPartitionedTridentSpout.Coordinator<Object> coordinator;
 
-        public Coordinator(Map<String, Object> conf, TopologyContext context) {
+        Coordinator(Map<String, Object> conf, TopologyContext context) {
             coordinator = spout.getCoordinator(conf, context);
         }
 
@@ -115,7 +115,7 @@
         private int index;
         private int numTasks;
 
-        public Emitter(String txStateId, Map<String, Object> conf, TopologyContext context) {
+        Emitter(String txStateId, Map<String, Object> conf, TopologyContext context) {
             emitter = spout.getEmitter(conf, context);
             state = TransactionalState.newUserState(conf, txStateId);
             index = context.getThisTaskIndex();
diff --git a/storm-client/src/jvm/org/apache/storm/trident/spout/RichSpoutBatchExecutor.java b/storm-client/src/jvm/org/apache/storm/trident/spout/RichSpoutBatchExecutor.java
index 16b86d6..7e87dee 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/spout/RichSpoutBatchExecutor.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/spout/RichSpoutBatchExecutor.java
@@ -129,7 +129,7 @@
         long lastRotate = System.currentTimeMillis();
         long rotateTime;
 
-        public RichSpoutEmitter(Map<String, Object> conf, TopologyContext context) {
+        RichSpoutEmitter(Map<String, Object> conf, TopologyContext context) {
             this.conf = conf;
             this.context = context;
             Number batchSize = (Number) conf.get(MAX_BATCH_SIZE_CONF);
diff --git a/storm-client/src/jvm/org/apache/storm/trident/spout/RichSpoutBatchTriggerer.java b/storm-client/src/jvm/org/apache/storm/trident/spout/RichSpoutBatchTriggerer.java
index aa0bddf..378824d 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/spout/RichSpoutBatchTriggerer.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/spout/RichSpoutBatchTriggerer.java
@@ -134,7 +134,7 @@
 
         SpoutOutputCollector collector;
 
-        public StreamOverrideCollector(SpoutOutputCollector collector) {
+        StreamOverrideCollector(SpoutOutputCollector collector) {
             this.collector = collector;
         }
 
diff --git a/storm-client/src/jvm/org/apache/storm/trident/spout/TridentSpoutExecutor.java b/storm-client/src/jvm/org/apache/storm/trident/spout/TridentSpoutExecutor.java
index 42223a7..e45ec3e 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/spout/TridentSpoutExecutor.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/spout/TridentSpoutExecutor.java
@@ -110,7 +110,7 @@
         Object id;
         String stream;
 
-        public AddIdCollector(String stream, BatchOutputCollector c) {
+        AddIdCollector(String stream, BatchOutputCollector c) {
             delegate = c;
             this.stream = stream;
         }
diff --git a/storm-client/src/jvm/org/apache/storm/trident/state/ITupleCollection.java b/storm-client/src/jvm/org/apache/storm/trident/state/ITupleCollection.java
index 9b1eba1..61108b8 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/state/ITupleCollection.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/state/ITupleCollection.java
@@ -17,5 +17,5 @@
 
 /* Container of a collection of tuples */
 public interface ITupleCollection {
-    public Iterator<List<Object>> getTuples();
+    Iterator<List<Object>> getTuples();
 }
diff --git a/storm-client/src/jvm/org/apache/storm/trident/state/map/OpaqueMap.java b/storm-client/src/jvm/org/apache/storm/trident/state/map/OpaqueMap.java
index aeda9f4..5775365 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/state/map/OpaqueMap.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/state/map/OpaqueMap.java
@@ -106,7 +106,7 @@
     static class ReplaceUpdater<T> implements ValueUpdater<T> {
         T value;
 
-        public ReplaceUpdater(T t) {
+        ReplaceUpdater(T t) {
             value = t;
         }
 
diff --git a/storm-client/src/jvm/org/apache/storm/trident/testing/FeederBatchSpout.java b/storm-client/src/jvm/org/apache/storm/trident/testing/FeederBatchSpout.java
index 7a65802..2ae6a43 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/testing/FeederBatchSpout.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/testing/FeederBatchSpout.java
@@ -86,7 +86,7 @@
 
         int index;
 
-        public FeederEmitter(int index) {
+        FeederEmitter(int index) {
             this.index = index;
         }
 
diff --git a/storm-client/src/jvm/org/apache/storm/trident/testing/FeederCommitterBatchSpout.java b/storm-client/src/jvm/org/apache/storm/trident/testing/FeederCommitterBatchSpout.java
index d65a478..6a71ad5 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/testing/FeederCommitterBatchSpout.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/testing/FeederCommitterBatchSpout.java
@@ -64,7 +64,7 @@
         ITridentSpout.Emitter emitter;
 
 
-        public CommitterEmitter(ITridentSpout.Emitter e) {
+        CommitterEmitter(ITridentSpout.Emitter e) {
             emitter = e;
         }
 
diff --git a/storm-client/src/jvm/org/apache/storm/trident/testing/LRUMemoryMapState.java b/storm-client/src/jvm/org/apache/storm/trident/testing/LRUMemoryMapState.java
index d1c2ba8..02edbb6 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/testing/LRUMemoryMapState.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/testing/LRUMemoryMapState.java
@@ -111,7 +111,7 @@
         Map<List<Object>, T> db;
         Long currTx;
 
-        public LRUMemoryMapStateBacking(int cacheSize, String id) {
+        LRUMemoryMapStateBacking(int cacheSize, String id) {
             if (!dbs.containsKey(id)) {
                 dbs.put(id, new LRUMap<List<Object>, Object>(cacheSize));
             }
diff --git a/storm-client/src/jvm/org/apache/storm/trident/testing/MemoryMapState.java b/storm-client/src/jvm/org/apache/storm/trident/testing/MemoryMapState.java
index 2647626..64dfc60 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/testing/MemoryMapState.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/testing/MemoryMapState.java
@@ -127,7 +127,7 @@
         Map<List<Object>, T> db;
         Long currTx;
 
-        public MemoryMapStateBacking(String id) {
+        MemoryMapStateBacking(String id) {
             if (!dbs.containsKey(id)) {
                 dbs.put(id, new HashMap());
             }
diff --git a/storm-client/src/jvm/org/apache/storm/trident/topology/MasterBatchCoordinator.java b/storm-client/src/jvm/org/apache/storm/trident/topology/MasterBatchCoordinator.java
index 3c5bc7f..6b595e4 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/topology/MasterBatchCoordinator.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/topology/MasterBatchCoordinator.java
@@ -286,7 +286,7 @@
                 + "}";
     }
 
-    private static enum AttemptStatus {
+    private enum AttemptStatus {
         PROCESSING,
         PROCESSED,
         COMMITTING
@@ -296,7 +296,7 @@
         TransactionAttempt attempt;
         AttemptStatus status;
 
-        public TransactionStatus(TransactionAttempt attempt) {
+        TransactionStatus(TransactionAttempt attempt) {
             this.attempt = attempt;
             this.status = AttemptStatus.PROCESSING;
         }
diff --git a/storm-client/src/jvm/org/apache/storm/trident/topology/TridentBoltExecutor.java b/storm-client/src/jvm/org/apache/storm/trident/topology/TridentBoltExecutor.java
index 7a187c9..11b61fb 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/topology/TridentBoltExecutor.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/topology/TridentBoltExecutor.java
@@ -285,7 +285,7 @@
         }
     }
 
-    static enum TupleType {
+    enum TupleType {
         REGULAR,
         COMMIT,
         COORD
@@ -380,7 +380,7 @@
 
         TrackedBatch currBatch = null;
 
-        public CoordinatedOutputCollector(IOutputCollector delegate) {
+        CoordinatedOutputCollector(IOutputCollector delegate) {
             this.delegate = delegate;
         }
 
diff --git a/storm-client/src/jvm/org/apache/storm/trident/topology/TridentTopologyBuilder.java b/storm-client/src/jvm/org/apache/storm/trident/topology/TridentTopologyBuilder.java
index f433590..843755e 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/topology/TridentTopologyBuilder.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/topology/TridentTopologyBuilder.java
@@ -289,7 +289,7 @@
         return ret;
     }
 
-    private static interface InputDeclaration {
+    private interface InputDeclaration {
         void declare(InputDeclarer declarer);
 
         String getComponent();
@@ -298,14 +298,14 @@
     }
 
     private static class SpoutComponent {
-        public final Object spout;
-        public final Integer parallelism;
-        public final Map<String, Object> componentConf = new HashMap<>();
+        final Object spout;
+        final Integer parallelism;
+        final Map<String, Object> componentConf = new HashMap<>();
         final String batchGroupId;
         final String streamName;
         final Set<SharedMemory> sharedMemory = new HashSet<>();
 
-        public SpoutComponent(Object spout, String streamName, Integer parallelism, String batchGroupId) {
+        SpoutComponent(Object spout, String streamName, Integer parallelism, String batchGroupId) {
             this.spout = spout;
             this.streamName = streamName;
             this.parallelism = parallelism;
@@ -321,7 +321,7 @@
     private static class TransactionalSpoutComponent extends SpoutComponent {
         public String commitStateId;
 
-        public TransactionalSpoutComponent(Object spout, String streamName, Integer parallelism, String commitStateId,
+        TransactionalSpoutComponent(Object spout, String streamName, Integer parallelism, String commitStateId,
                                            String batchGroupId) {
             super(spout, streamName, parallelism, batchGroupId);
             this.commitStateId = commitStateId;
@@ -341,7 +341,7 @@
         public final Set<String> committerBatches;
         public final Set<SharedMemory> sharedMemory = new HashSet<>();
 
-        public Component(ITridentBatchBolt bolt, Integer parallelism, Set<String> committerBatches) {
+        Component(ITridentBatchBolt bolt, Integer parallelism, Set<String> committerBatches) {
             this.bolt = bolt;
             this.parallelism = parallelism;
             this.committerBatches = committerBatches;
@@ -356,7 +356,7 @@
     private static class SpoutDeclarerImpl extends BaseConfigurationDeclarer<SpoutDeclarer> implements SpoutDeclarer {
         SpoutComponent component;
 
-        public SpoutDeclarerImpl(SpoutComponent component) {
+        SpoutDeclarerImpl(SpoutComponent component) {
             this.component = component;
         }
 
@@ -388,7 +388,7 @@
     private static class BoltDeclarerImpl extends BaseConfigurationDeclarer<BoltDeclarer> implements BoltDeclarer {
         Component component;
 
-        public BoltDeclarerImpl(Component component) {
+        BoltDeclarerImpl(Component component) {
             this.component = component;
         }
 
diff --git a/storm-client/src/jvm/org/apache/storm/trident/topology/state/RotatingTransactionalState.java b/storm-client/src/jvm/org/apache/storm/trident/topology/state/RotatingTransactionalState.java
index 52b7f05..424ce4a 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/topology/state/RotatingTransactionalState.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/topology/state/RotatingTransactionalState.java
@@ -175,7 +175,7 @@
                 + '}';
     }
 
-    public static interface StateInitializer {
+    public interface StateInitializer {
         Object init(long txid, Object lastState);
     }
 }
diff --git a/storm-client/src/jvm/org/apache/storm/trident/tuple/ComboList.java b/storm-client/src/jvm/org/apache/storm/trident/tuple/ComboList.java
index 3ab06a8..a10dcde 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/tuple/ComboList.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/tuple/ComboList.java
@@ -77,7 +77,7 @@
         int listIndex;
         int subIndex;
 
-        public Pointer(int listIndex, int subIndex) {
+        Pointer(int listIndex, int subIndex) {
             this.listIndex = listIndex;
             this.subIndex = subIndex;
         }
diff --git a/storm-client/src/jvm/org/apache/storm/trident/tuple/TridentTuple.java b/storm-client/src/jvm/org/apache/storm/trident/tuple/TridentTuple.java
index 42df022..d0ec01e 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/tuple/TridentTuple.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/tuple/TridentTuple.java
@@ -19,7 +19,7 @@
 
 public interface TridentTuple extends ITuple, List<Object> {
 
-    public static interface Factory extends Serializable {
+    interface Factory extends Serializable {
         Map<String, ValuePointer> getFieldIndex();
 
         List<String> getOutputFields();
diff --git a/storm-client/src/jvm/org/apache/storm/trident/windowing/AbstractTridentWindowManager.java b/storm-client/src/jvm/org/apache/storm/trident/windowing/AbstractTridentWindowManager.java
index 81451d3..81f218e 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/windowing/AbstractTridentWindowManager.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/windowing/AbstractTridentWindowManager.java
@@ -157,7 +157,7 @@
         final List<List<Object>> values = new ArrayList<>();
         private final BatchOutputCollector delegateCollector;
 
-        public AccumulatedTuplesCollector(BatchOutputCollector delegateCollector) {
+        AccumulatedTuplesCollector(BatchOutputCollector delegateCollector) {
             this.delegateCollector = delegateCollector;
         }
 
@@ -182,7 +182,7 @@
         final int id;
         final List<List<Object>> result;
 
-        public TriggerResult(int id, List<List<Object>> result) {
+        TriggerResult(int id, List<List<Object>> result) {
             this.id = id;
             this.result = result;
         }
diff --git a/storm-client/src/jvm/org/apache/storm/trident/windowing/ITridentWindowManager.java b/storm-client/src/jvm/org/apache/storm/trident/windowing/ITridentWindowManager.java
index cf23afc..167e444 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/windowing/ITridentWindowManager.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/windowing/ITridentWindowManager.java
@@ -26,22 +26,22 @@
      * done before the topology starts accepting tuples. For ex: initialize window manager with any earlier stored tuples/triggers and start
      * WindowManager.
      */
-    public void prepare();
+    void prepare();
 
     /**
      * This is invoked when from {@code org.apache.storm.trident.planner.TridentProcessor}'s  cleanup method. So, any cleanup operations
      * like clearing cache or close store connection etc can be done.
      */
-    public void shutdown();
+    void shutdown();
 
     /**
      * Add received batch of tuples to cache/store and add them to {@code WindowManager}.
      */
-    public void addTuplesBatch(Object batchId, List<TridentTuple> tuples);
+    void addTuplesBatch(Object batchId, List<TridentTuple> tuples);
 
     /**
      * Returns pending triggers to be emitted.
      */
-    public Queue<StoreBasedTridentWindowManager.TriggerResult> getPendingTriggers();
+    Queue<StoreBasedTridentWindowManager.TriggerResult> getPendingTriggers();
 
 }
diff --git a/storm-client/src/jvm/org/apache/storm/trident/windowing/WindowsStore.java b/storm-client/src/jvm/org/apache/storm/trident/windowing/WindowsStore.java
index 7e1d051..e8e5b3d 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/windowing/WindowsStore.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/windowing/WindowsStore.java
@@ -26,28 +26,28 @@
     /**
      * This can be used as a separator while generating a key from sequence of strings.
      */
-    public static final String KEY_SEPARATOR = "|";
+    String KEY_SEPARATOR = "|";
 
-    public Object get(String key);
+    Object get(String key);
 
-    public Iterable<Object> get(List<String> keys);
+    Iterable<Object> get(List<String> keys);
 
-    public Iterable<String> getAllKeys();
+    Iterable<String> getAllKeys();
 
-    public void put(String key, Object value);
+    void put(String key, Object value);
 
-    public void putAll(Collection<Entry> entries);
+    void putAll(Collection<Entry> entries);
 
-    public void remove(String key);
+    void remove(String key);
 
-    public void removeAll(Collection<String> keys);
+    void removeAll(Collection<String> keys);
 
-    public void shutdown();
+    void shutdown();
 
     /**
      * This class wraps key and value objects which can be passed to {@code putAll} method.
      */
-    public static class Entry implements Serializable {
+    class Entry implements Serializable {
         public final String key;
         public final Object value;
 
diff --git a/storm-client/src/jvm/org/apache/storm/trident/windowing/WindowsStoreFactory.java b/storm-client/src/jvm/org/apache/storm/trident/windowing/WindowsStoreFactory.java
index b901d51..46621cd 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/windowing/WindowsStoreFactory.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/windowing/WindowsStoreFactory.java
@@ -28,5 +28,5 @@
      * @param topoConf storm topology configuration passed in {@link org.apache.storm.trident.planner.TridentProcessor#prepare(Map,
      *                 TopologyContext, TridentContext)}
      */
-    public WindowsStore create(Map<String, Object> topoConf);
+    WindowsStore create(Map<String, Object> topoConf);
 }
diff --git a/storm-client/src/jvm/org/apache/storm/trident/windowing/config/WindowConfig.java b/storm-client/src/jvm/org/apache/storm/trident/windowing/config/WindowConfig.java
index 7644b8b..49b9d6a 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/windowing/config/WindowConfig.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/windowing/config/WindowConfig.java
@@ -23,21 +23,21 @@
     /**
      * Returns the length of the window.
      */
-    public int getWindowLength();
+    int getWindowLength();
 
     /**
      * Returns the sliding length of the moving window.
      */
-    public int getSlidingLength();
+    int getSlidingLength();
 
     /**
      * Gives the type of windowing. It can be any of {@code Type} values.
      */
-    public <T> WindowStrategy<T> getWindowStrategy();
+    <T> WindowStrategy<T> getWindowStrategy();
 
-    public void validate();
+    void validate();
 
-    public enum Type {
+    enum Type {
         SLIDING_COUNT,
         TUMBLING_COUNT,
         SLIDING_DURATION,
diff --git a/storm-client/src/jvm/org/apache/storm/trident/windowing/strategy/WindowStrategy.java b/storm-client/src/jvm/org/apache/storm/trident/windowing/strategy/WindowStrategy.java
index ff814f9..860390e 100644
--- a/storm-client/src/jvm/org/apache/storm/trident/windowing/strategy/WindowStrategy.java
+++ b/storm-client/src/jvm/org/apache/storm/trident/windowing/strategy/WindowStrategy.java
@@ -24,10 +24,10 @@
     /**
      * Returns a {@code TriggerPolicy}  by creating with {@code triggerHandler} and {@code evictionPolicy} with the given configuration.
      */
-    public TriggerPolicy<T, ?> getTriggerPolicy(TriggerHandler triggerHandler, EvictionPolicy<T, ?> evictionPolicy);
+    TriggerPolicy<T, ?> getTriggerPolicy(TriggerHandler triggerHandler, EvictionPolicy<T, ?> evictionPolicy);
 
     /**
      * Returns an {@code EvictionPolicy} instance for this strategy with the given configuration.
      */
-    public EvictionPolicy<T, ?> getEvictionPolicy();
+    EvictionPolicy<T, ?> getEvictionPolicy();
 }
diff --git a/storm-client/src/jvm/org/apache/storm/tuple/ITuple.java b/storm-client/src/jvm/org/apache/storm/tuple/ITuple.java
index 0d0a8fb..d176474 100644
--- a/storm-client/src/jvm/org/apache/storm/tuple/ITuple.java
+++ b/storm-client/src/jvm/org/apache/storm/tuple/ITuple.java
@@ -19,36 +19,36 @@
     /**
      * Returns the number of fields in this tuple.
      */
-    public int size();
+    int size();
 
     /**
      * Returns true if this tuple contains the specified name of the field.
      */
-    public boolean contains(String field);
+    boolean contains(String field);
 
     /**
      * Gets the names of the fields in this tuple.
      */
-    public Fields getFields();
+    Fields getFields();
 
     /**
      * Returns the position of the specified field in this tuple.
      *
      * @throws IllegalArgumentException - if field does not exist
      */
-    public int fieldIndex(String field);
+    int fieldIndex(String field);
 
     /**
      * Returns a subset of the tuple based on the fields selector.
      */
-    public List<Object> select(Fields selector);
+    List<Object> select(Fields selector);
 
     /**
      * Gets the field at position i in the tuple. Returns object since tuples are dynamically typed.
      *
      * @throws IndexOutOfBoundsException - if the index is out of range `(index < 0 || index >= size())`
      */
-    public Object getValue(int i);
+    Object getValue(int i);
 
     /**
      * Returns the String at position i in the tuple.
@@ -56,7 +56,7 @@
      * @throws ClassCastException        If that field is not a String
      * @throws IndexOutOfBoundsException - if the index is out of range `(index < 0 || index >= size())`
      */
-    public String getString(int i);
+    String getString(int i);
 
     /**
      * Returns the Integer at position i in the tuple.
@@ -64,7 +64,7 @@
      * @throws ClassCastException        If that field is not a Integer
      * @throws IndexOutOfBoundsException - if the index is out of range `(index < 0 || index >= size())`
      */
-    public Integer getInteger(int i);
+    Integer getInteger(int i);
 
     /**
      * Returns the Long at position i in the tuple.
@@ -72,7 +72,7 @@
      * @throws ClassCastException        If that field is not a Long
      * @throws IndexOutOfBoundsException - if the index is out of range `(index < 0 || index >= size())`
      */
-    public Long getLong(int i);
+    Long getLong(int i);
 
     /**
      * Returns the Boolean at position i in the tuple.
@@ -80,7 +80,7 @@
      * @throws ClassCastException        If that field is not a Boolean
      * @throws IndexOutOfBoundsException - if the index is out of range `(index < 0 || index >= size())`
      */
-    public Boolean getBoolean(int i);
+    Boolean getBoolean(int i);
 
     /**
      * Returns the Short at position i in the tuple.
@@ -88,7 +88,7 @@
      * @throws ClassCastException        If that field is not a Short
      * @throws IndexOutOfBoundsException - if the index is out of range `(index < 0 || index >= size())`
      */
-    public Short getShort(int i);
+    Short getShort(int i);
 
     /**
      * Returns the Byte at position i in the tuple.
@@ -96,7 +96,7 @@
      * @throws ClassCastException        If that field is not a Byte
      * @throws IndexOutOfBoundsException - if the index is out of range `(index < 0 || index >= size())`
      */
-    public Byte getByte(int i);
+    Byte getByte(int i);
 
     /**
      * Returns the Double at position i in the tuple.
@@ -104,7 +104,7 @@
      * @throws ClassCastException        If that field is not a Double
      * @throws IndexOutOfBoundsException - if the index is out of range `(index < 0 || index >= size())`
      */
-    public Double getDouble(int i);
+    Double getDouble(int i);
 
     /**
      * Returns the Float at position i in the tuple.
@@ -112,7 +112,7 @@
      * @throws ClassCastException        If that field is not a Float
      * @throws IndexOutOfBoundsException - if the index is out of range `(index < 0 || index >= size())`
      */
-    public Float getFloat(int i);
+    Float getFloat(int i);
 
     /**
      * Returns the byte array at position i in the tuple.
@@ -120,14 +120,14 @@
      * @throws ClassCastException        If that field is not a byte array
      * @throws IndexOutOfBoundsException - if the index is out of range `(index < 0 || index >= size())`
      */
-    public byte[] getBinary(int i);
+    byte[] getBinary(int i);
 
     /**
      * Gets the field with a specific name. Returns object since tuples are dynamically typed.
      *
      * @throws IllegalArgumentException - if field does not exist
      */
-    public Object getValueByField(String field);
+    Object getValueByField(String field);
 
     /**
      * Gets the String field with a specific name.
@@ -135,7 +135,7 @@
      * @throws ClassCastException       If that field is not a String
      * @throws IllegalArgumentException - if field does not exist
      */
-    public String getStringByField(String field);
+    String getStringByField(String field);
 
     /**
      * Gets the Integer field with a specific name.
@@ -143,7 +143,7 @@
      * @throws ClassCastException       If that field is not an Integer
      * @throws IllegalArgumentException - if field does not exist
      */
-    public Integer getIntegerByField(String field);
+    Integer getIntegerByField(String field);
 
     /**
      * Gets the Long field with a specific name.
@@ -151,7 +151,7 @@
      * @throws ClassCastException       If that field is not a Long
      * @throws IllegalArgumentException - if field does not exist
      */
-    public Long getLongByField(String field);
+    Long getLongByField(String field);
 
     /**
      * Gets the Boolean field with a specific name.
@@ -159,7 +159,7 @@
      * @throws ClassCastException       If that field is not a Boolean
      * @throws IllegalArgumentException - if field does not exist
      */
-    public Boolean getBooleanByField(String field);
+    Boolean getBooleanByField(String field);
 
     /**
      * Gets the Short field with a specific name.
@@ -167,7 +167,7 @@
      * @throws ClassCastException       If that field is not a Short
      * @throws IllegalArgumentException - if field does not exist
      */
-    public Short getShortByField(String field);
+    Short getShortByField(String field);
 
     /**
      * Gets the Byte field with a specific name.
@@ -175,7 +175,7 @@
      * @throws ClassCastException       If that field is not a Byte
      * @throws IllegalArgumentException - if field does not exist
      */
-    public Byte getByteByField(String field);
+    Byte getByteByField(String field);
 
     /**
      * Gets the Double field with a specific name.
@@ -183,7 +183,7 @@
      * @throws ClassCastException       If that field is not a Double
      * @throws IllegalArgumentException - if field does not exist
      */
-    public Double getDoubleByField(String field);
+    Double getDoubleByField(String field);
 
     /**
      * Gets the Float field with a specific name.
@@ -191,7 +191,7 @@
      * @throws ClassCastException       If that field is not a Float
      * @throws IllegalArgumentException - if field does not exist
      */
-    public Float getFloatByField(String field);
+    Float getFloatByField(String field);
 
     /**
      * Gets the Byte array field with a specific name.
@@ -199,11 +199,11 @@
      * @throws ClassCastException       If that field is not a byte array
      * @throws IllegalArgumentException - if field does not exist
      */
-    public byte[] getBinaryByField(String field);
+    byte[] getBinaryByField(String field);
 
     /**
      * Gets all the values in this tuple.
      */
-    public List<Object> getValues();
+    List<Object> getValues();
 
 }
diff --git a/storm-client/src/jvm/org/apache/storm/tuple/Tuple.java b/storm-client/src/jvm/org/apache/storm/tuple/Tuple.java
index 01d6c88..4dc51a2 100644
--- a/storm-client/src/jvm/org/apache/storm/tuple/Tuple.java
+++ b/storm-client/src/jvm/org/apache/storm/tuple/Tuple.java
@@ -30,30 +30,30 @@
     /**
      * Returns the global stream id (component + stream) of this tuple.
      */
-    public GlobalStreamId getSourceGlobalStreamId();
+    GlobalStreamId getSourceGlobalStreamId();
 
     /**
      * Gets the id of the component that created this tuple.
      */
-    public String getSourceComponent();
+    String getSourceComponent();
 
     /**
      * Gets the id of the task that created this tuple.
      */
-    public int getSourceTask();
+    int getSourceTask();
 
     /**
      * Gets the id of the stream that this tuple was emitted to.
      */
-    public String getSourceStreamId();
+    String getSourceStreamId();
 
     /**
      * Gets the message id that associated with this tuple.
      */
-    public MessageId getMessageId();
+    MessageId getMessageId();
 
     /**
      * Gets the topology context associated with the tuple.
      */
-    public GeneralTopologyContext getContext();
+    GeneralTopologyContext getContext();
 }
diff --git a/storm-client/src/jvm/org/apache/storm/utils/JCQueue.java b/storm-client/src/jvm/org/apache/storm/utils/JCQueue.java
index 063447a..b0f2d9f 100644
--- a/storm-client/src/jvm/org/apache/storm/utils/JCQueue.java
+++ b/storm-client/src/jvm/org/apache/storm/utils/JCQueue.java
@@ -289,7 +289,7 @@
     private static class DirectInserter implements Inserter {
         private JCQueue queue;
 
-        public DirectInserter(JCQueue queue) {
+        DirectInserter(JCQueue queue) {
             this.queue = queue;
         }
 
@@ -344,7 +344,7 @@
         private JCQueue queue;
         private ArrayList<Object> currentBatch;
 
-        public BatchInserter(JCQueue queue, int batchSz) {
+        BatchInserter(JCQueue queue, int batchSz) {
             this.queue = queue;
             this.batchSz = batchSz;
             this.currentBatch = new ArrayList<>(batchSz + 1);
diff --git a/storm-client/src/jvm/org/apache/storm/utils/ShellUtils.java b/storm-client/src/jvm/org/apache/storm/utils/ShellUtils.java
index 9e141d7..96b3a02 100644
--- a/storm-client/src/jvm/org/apache/storm/utils/ShellUtils.java
+++ b/storm-client/src/jvm/org/apache/storm/utils/ShellUtils.java
@@ -485,7 +485,7 @@
 
         private ShellUtils shell;
 
-        public ShellTimeoutTimerTask(ShellUtils shell) {
+        ShellTimeoutTimerTask(ShellUtils shell) {
             this.shell = shell;
         }
 
diff --git a/storm-client/src/jvm/org/apache/storm/utils/Utils.java b/storm-client/src/jvm/org/apache/storm/utils/Utils.java
index eb31eaf..afb72c4 100644
--- a/storm-client/src/jvm/org/apache/storm/utils/Utils.java
+++ b/storm-client/src/jvm/org/apache/storm/utils/Utils.java
@@ -1800,7 +1800,7 @@
         private Map<String, Object> stormConf;
         private File file;
 
-        public JarConfigReader(Yaml yaml, Map<String, Object> defaultsConf, Map<String, Object> stormConf, File file) {
+        JarConfigReader(Yaml yaml, Map<String, Object> defaultsConf, Map<String, Object> stormConf, File file) {
             this.yaml = yaml;
             this.defaultsConf = defaultsConf;
             this.stormConf = stormConf;
diff --git a/storm-client/src/jvm/org/apache/storm/validation/ConfigValidationUtils.java b/storm-client/src/jvm/org/apache/storm/validation/ConfigValidationUtils.java
index 1510a1b..21d2cde 100644
--- a/storm-client/src/jvm/org/apache/storm/validation/ConfigValidationUtils.java
+++ b/storm-client/src/jvm/org/apache/storm/validation/ConfigValidationUtils.java
@@ -143,7 +143,7 @@
     /**
      * Declares methods for validating configuration values.
      */
-    public static interface FieldValidator {
+    public interface FieldValidator {
         /**
          * Validates the given field.
          *
@@ -151,7 +151,7 @@
          * @param field The field to be validated.
          * @throws IllegalArgumentException if the field fails validation.
          */
-        public void validateField(String name, Object field) throws IllegalArgumentException;
+        void validateField(String name, Object field) throws IllegalArgumentException;
     }
 
     /**
diff --git a/storm-client/src/jvm/org/apache/storm/windowing/EvictionPolicy.java b/storm-client/src/jvm/org/apache/storm/windowing/EvictionPolicy.java
index 27bea2a..b95414e 100644
--- a/storm-client/src/jvm/org/apache/storm/windowing/EvictionPolicy.java
+++ b/storm-client/src/jvm/org/apache/storm/windowing/EvictionPolicy.java
@@ -70,7 +70,7 @@
     /**
      * The action to be taken when {@link EvictionPolicy#evict(Event)} is invoked.
      */
-    public enum Action {
+    enum Action {
         /**
          * expire the event and remove it from the queue.
          */
diff --git a/storm-client/src/py/storm/ttypes.py b/storm-client/src/py/storm/ttypes.py
index 4fae416..166e604 100644
--- a/storm-client/src/py/storm/ttypes.py
+++ b/storm-client/src/py/storm/ttypes.py
@@ -2335,11 +2335,12 @@
      - used_cpu
      - fragmented_mem
      - fragmented_cpu
+     - blacklisted
 
     """
 
 
-    def __init__(self, host=None, uptime_secs=None, num_workers=None, num_used_workers=None, supervisor_id=None, version="VERSION_NOT_PROVIDED", total_resources=None, used_mem=None, used_cpu=None, fragmented_mem=None, fragmented_cpu=None,):
+    def __init__(self, host=None, uptime_secs=None, num_workers=None, num_used_workers=None, supervisor_id=None, version="VERSION_NOT_PROVIDED", total_resources=None, used_mem=None, used_cpu=None, fragmented_mem=None, fragmented_cpu=None, blacklisted=None,):
         self.host = host
         self.uptime_secs = uptime_secs
         self.num_workers = num_workers
@@ -2351,6 +2352,7 @@
         self.used_cpu = used_cpu
         self.fragmented_mem = fragmented_mem
         self.fragmented_cpu = fragmented_cpu
+        self.blacklisted = blacklisted
 
     def read(self, iprot):
         if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
@@ -2422,6 +2424,11 @@
                     self.fragmented_cpu = iprot.readDouble()
                 else:
                     iprot.skip(ftype)
+            elif fid == 12:
+                if ftype == TType.BOOL:
+                    self.blacklisted = iprot.readBool()
+                else:
+                    iprot.skip(ftype)
             else:
                 iprot.skip(ftype)
             iprot.readFieldEnd()
@@ -2480,6 +2487,10 @@
             oprot.writeFieldBegin('fragmented_cpu', TType.DOUBLE, 11)
             oprot.writeDouble(self.fragmented_cpu)
             oprot.writeFieldEnd()
+        if self.blacklisted is not None:
+            oprot.writeFieldBegin('blacklisted', TType.BOOL, 12)
+            oprot.writeBool(self.blacklisted)
+            oprot.writeFieldEnd()
         oprot.writeFieldStop()
         oprot.writeStructEnd()
 
@@ -10920,6 +10931,7 @@
     (9, TType.DOUBLE, 'used_cpu', None, None, ),  # 9
     (10, TType.DOUBLE, 'fragmented_mem', None, None, ),  # 10
     (11, TType.DOUBLE, 'fragmented_cpu', None, None, ),  # 11
+    (12, TType.BOOL, 'blacklisted', None, None, ),  # 12
 )
 all_structs.append(NimbusSummary)
 NimbusSummary.thrift_spec = (
diff --git a/storm-client/src/storm.thrift b/storm-client/src/storm.thrift
index e148500..401a69b 100644
--- a/storm-client/src/storm.thrift
+++ b/storm-client/src/storm.thrift
@@ -194,6 +194,7 @@
   9: optional double used_cpu;
   10: optional double fragmented_mem;
   11: optional double fragmented_cpu;
+  12: optional bool blacklisted;
 }
 
 struct NimbusSummary {
diff --git a/storm-clojure-test/pom.xml b/storm-clojure-test/pom.xml
index c58ceec..a8ea65d 100644
--- a/storm-clojure-test/pom.xml
+++ b/storm-clojure-test/pom.xml
@@ -20,7 +20,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
     </parent>
 
     <artifactId>storm-clojure-test</artifactId>
diff --git a/storm-clojure/pom.xml b/storm-clojure/pom.xml
index 89d200b..b5e09de 100644
--- a/storm-clojure/pom.xml
+++ b/storm-clojure/pom.xml
@@ -20,7 +20,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
     </parent>
 
     <artifactId>storm-clojure</artifactId>
@@ -109,9 +109,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/storm-clojure/src/main/java/org/apache/storm/clojure/ClojureTuple.java b/storm-clojure/src/main/java/org/apache/storm/clojure/ClojureTuple.java
index 3b4789c..bc47d7b 100644
--- a/storm-clojure/src/main/java/org/apache/storm/clojure/ClojureTuple.java
+++ b/storm-clojure/src/main/java/org/apache/storm/clojure/ClojureTuple.java
@@ -121,7 +121,7 @@
             this.count = count;
         }
 
-        public Seq(IPersistentMap meta, List<String> fields, List<Object> values, int count) {
+        Seq(IPersistentMap meta, List<String> fields, List<Object> values, int count) {
             super(meta);
             this.fields = fields;
             this.values = values;
diff --git a/storm-core/pom.xml b/storm-core/pom.xml
index 71d220f..1ae902a 100644
--- a/storm-core/pom.xml
+++ b/storm-core/pom.xml
@@ -20,7 +20,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>..</relativePath>
     </parent>
 
@@ -302,9 +302,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/storm-core/src/jvm/org/apache/storm/command/CLI.java b/storm-core/src/jvm/org/apache/storm/command/CLI.java
index cc5c5dc..d9f3fe1 100644
--- a/storm-core/src/jvm/org/apache/storm/command/CLI.java
+++ b/storm-core/src/jvm/org/apache/storm/command/CLI.java
@@ -221,7 +221,7 @@
         final Assoc assoc;
         final boolean noValue;
 
-        public Opt(String shortName, String longName, Object defaultValue, Parse parse, Assoc assoc, boolean noValue) {
+        Opt(String shortName, String longName, Object defaultValue, Parse parse, Assoc assoc, boolean noValue) {
             this.shortName = shortName;
             this.longName = longName;
             this.defaultValue = defaultValue;
@@ -240,7 +240,7 @@
         final Parse parse;
         final Assoc assoc;
 
-        public Arg(String name, Parse parse, Assoc assoc) {
+        Arg(String name, Parse parse, Assoc assoc) {
             this.name = name;
             this.parse = parse == null ? AS_STRING : parse;
             this.assoc = assoc == null ? INTO_LIST : assoc;
diff --git a/storm-core/src/jvm/org/apache/storm/command/KillTopology.java b/storm-core/src/jvm/org/apache/storm/command/KillTopology.java
index 696c199..e58c08b 100644
--- a/storm-core/src/jvm/org/apache/storm/command/KillTopology.java
+++ b/storm-core/src/jvm/org/apache/storm/command/KillTopology.java
@@ -25,21 +25,47 @@
 
     public static void main(String[] args) throws Exception {
         Map<String, Object> cl = CLI.opt("w", "wait", null, CLI.AS_INT)
+                                    .boolOpt("i", "ignore-errors")
                                     .arg("TOPO", CLI.INTO_LIST)
                                     .parse(args);
+
+        @SuppressWarnings("unchecked")
         final List<String> names = (List<String>) cl.get("TOPO");
+
+        // Wait this many seconds after deactivating topology before killing
         Integer wait = (Integer) cl.get("w");
 
+        // if '-i' is set, we'll try to kill every topology listed, even if an error occurs
+        Boolean continueOnError = (Boolean) cl.get("i");
+
         final KillOptions opts = new KillOptions();
         if (wait != null) {
             opts.set_wait_secs(wait);
         }
+
         NimbusClient.withConfiguredClient(new NimbusClient.WithNimbus() {
             @Override
             public void run(Nimbus.Iface nimbus) throws Exception {
+                int errorCount = 0;
                 for (String name : names) {
-                    nimbus.killTopologyWithOpts(name, opts);
-                    LOG.info("Killed topology: {}", name);
+                    try {
+                        nimbus.killTopologyWithOpts(name, opts);
+                        LOG.info("Killed topology: {}", name);
+                    } catch (Exception e) {
+                        errorCount += 1;
+                        if (!continueOnError) {
+                            throw e;
+                        } else {
+                            LOG.error(
+                                    "Caught error killing topology '{}'; continuing as -i was passed.", name, e
+                            );
+                        }
+                    }
+                }
+
+                // If we failed to kill any topology, still exit with failure status
+                if (errorCount > 0) {
+                    throw new RuntimeException("Failed to successfully kill " + errorCount + " topologies.");
                 }
             }
         });
diff --git a/storm-core/src/jvm/org/apache/storm/command/SetLogLevel.java b/storm-core/src/jvm/org/apache/storm/command/SetLogLevel.java
index 7e9815a..411c586 100644
--- a/storm-core/src/jvm/org/apache/storm/command/SetLogLevel.java
+++ b/storm-core/src/jvm/org/apache/storm/command/SetLogLevel.java
@@ -74,7 +74,7 @@
 
         private LogLevelAction action;
 
-        public LogLevelsParser(LogLevelAction action) {
+        LogLevelsParser(LogLevelAction action) {
             this.action = action;
         }
 
diff --git a/storm-core/test/clj/org/apache/storm/scheduler/multitenant_scheduler_test.clj b/storm-core/test/clj/org/apache/storm/scheduler/multitenant_scheduler_test.clj
index a9e843c..2808770 100644
--- a/storm-core/test/clj/org/apache/storm/scheduler/multitenant_scheduler_test.clj
+++ b/storm-core/test/clj/org/apache/storm/scheduler/multitenant_scheduler_test.clj
@@ -104,14 +104,14 @@
           ns-count-3 (.getNodeAndSlotCountIfSlotsWereTaken free-pool 3)
           ns-count-4 (.getNodeAndSlotCountIfSlotsWereTaken free-pool 4)
           ns-count-5 (.getNodeAndSlotCountIfSlotsWereTaken free-pool 5)]
-      (is (= 1 (._nodes ns-count-1)))
-      (is (= 4 (._slots ns-count-1)))
-      (is (= 1 (._nodes ns-count-3)))
-      (is (= 4 (._slots ns-count-3)))
-      (is (= 1 (._nodes ns-count-4)))
-      (is (= 4 (._slots ns-count-4)))
-      (is (= 2 (._nodes ns-count-5)))
-      (is (= 8 (._slots ns-count-5)))
+      (is (= 1 (.nodes ns-count-1)))
+      (is (= 4 (.slots ns-count-1)))
+      (is (= 1 (.nodes ns-count-3)))
+      (is (= 4 (.slots ns-count-3)))
+      (is (= 1 (.nodes ns-count-4)))
+      (is (= 4 (.slots ns-count-4)))
+      (is (= 2 (.nodes ns-count-5)))
+      (is (= 8 (.slots ns-count-5)))
     )
     (let [nodes (.takeNodesBySlots free-pool 5)]
       (is (= 2 (.size nodes)))
@@ -347,14 +347,14 @@
           ns-count-3 (.getNodeAndSlotCountIfSlotsWereTaken default-pool 3)
           ns-count-4 (.getNodeAndSlotCountIfSlotsWereTaken default-pool 4)
           ns-count-5 (.getNodeAndSlotCountIfSlotsWereTaken default-pool 5)]
-      (is (= 1 (._nodes ns-count-1)))
-      (is (= 4 (._slots ns-count-1)))
-      (is (= 1 (._nodes ns-count-3)))
-      (is (= 4 (._slots ns-count-3)))
-      (is (= 1 (._nodes ns-count-4)))
-      (is (= 4 (._slots ns-count-4)))
-      (is (= 2 (._nodes ns-count-5)))
-      (is (= 8 (._slots ns-count-5)))
+      (is (= 1 (.nodes ns-count-1)))
+      (is (= 4 (.slots ns-count-1)))
+      (is (= 1 (.nodes ns-count-3)))
+      (is (= 4 (.slots ns-count-3)))
+      (is (= 1 (.nodes ns-count-4)))
+      (is (= 4 (.slots ns-count-4)))
+      (is (= 2 (.nodes ns-count-5)))
+      (is (= 8 (.slots ns-count-5)))
     )
     (let [nodes (.takeNodesBySlots default-pool 3)]
       (is (= 1 (.size nodes)))
@@ -532,14 +532,14 @@
           ns-count-3 (.getNodeAndSlotCountIfSlotsWereTaken isolated-pool 3)
           ns-count-4 (.getNodeAndSlotCountIfSlotsWereTaken isolated-pool 4)
           ns-count-5 (.getNodeAndSlotCountIfSlotsWereTaken isolated-pool 5)]
-      (is (= 1 (._nodes ns-count-1)))
-      (is (= 4 (._slots ns-count-1)))
-      (is (= 1 (._nodes ns-count-3)))
-      (is (= 4 (._slots ns-count-3)))
-      (is (= 1 (._nodes ns-count-4)))
-      (is (= 4 (._slots ns-count-4)))
-      (is (= 1 (._nodes ns-count-5))) ;;Only 1 node can be stolen right now
-      (is (= 4 (._slots ns-count-5)))
+      (is (= 1 (.nodes ns-count-1)))
+      (is (= 4 (.slots ns-count-1)))
+      (is (= 1 (.nodes ns-count-3)))
+      (is (= 4 (.slots ns-count-3)))
+      (is (= 1 (.nodes ns-count-4)))
+      (is (= 4 (.slots ns-count-4)))
+      (is (= 1 (.nodes ns-count-5))) ;;Only 1 node can be stolen right now
+      (is (= 4 (.slots ns-count-5)))
     )
     (let [nodes (.takeNodesBySlots isolated-pool 3)]
       (is (= 1 (.size nodes)))
diff --git a/storm-core/test/jvm/org/apache/storm/nimbus/InMemoryTopologyActionNotifier.java b/storm-core/test/jvm/org/apache/storm/nimbus/InMemoryTopologyActionNotifier.java
index a7102c4..2328c75 100644
--- a/storm-core/test/jvm/org/apache/storm/nimbus/InMemoryTopologyActionNotifier.java
+++ b/storm-core/test/jvm/org/apache/storm/nimbus/InMemoryTopologyActionNotifier.java
@@ -25,7 +25,7 @@
 
 
     @Override
-    public void prepare(Map<String, Object> StormConf) {
+    public void prepare(Map<String, Object> stormConf) {
         //no-op
     }
 
diff --git a/storm-dist/binary/final-package/pom.xml b/storm-dist/binary/final-package/pom.xml
index f006202..934a1c9 100644
--- a/storm-dist/binary/final-package/pom.xml
+++ b/storm-dist/binary/final-package/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>apache-storm-bin</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
 
diff --git a/storm-dist/binary/pom.xml b/storm-dist/binary/pom.xml
index c4d429e..891edd2 100644
--- a/storm-dist/binary/pom.xml
+++ b/storm-dist/binary/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
@@ -38,6 +38,8 @@
     <properties>
         <packageTimestamp>${maven.build.timestamp}</packageTimestamp>
         <maven.build.timestamp.format>YYYYMMddHHmm</maven.build.timestamp.format>
+        <license.thirdPartyFilename>THIRD-PARTY.txt</license.thirdPartyFilename>
+        <license.outputDirectory>${project.build.directory}/generated-sources/license</license.outputDirectory>
     </properties>
 
     <modules>
diff --git a/storm-dist/binary/storm-autocreds-bin/pom.xml b/storm-dist/binary/storm-autocreds-bin/pom.xml
index f9e3bec..ca91b53 100644
--- a/storm-dist/binary/storm-autocreds-bin/pom.xml
+++ b/storm-dist/binary/storm-autocreds-bin/pom.xml
@@ -20,7 +20,7 @@
     <parent>
         <groupId>org.apache.storm</groupId>
         <artifactId>apache-storm-bin</artifactId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
     </parent>
     <artifactId>storm-autocreds-bin</artifactId>
     <packaging>pom</packaging>
diff --git a/storm-dist/binary/storm-client-bin/pom.xml b/storm-dist/binary/storm-client-bin/pom.xml
index 1eb118e..58e8379 100644
--- a/storm-dist/binary/storm-client-bin/pom.xml
+++ b/storm-dist/binary/storm-client-bin/pom.xml
@@ -22,7 +22,7 @@
     <parent>
         <artifactId>apache-storm-bin</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
 
diff --git a/storm-dist/binary/storm-kafka-monitor-bin/pom.xml b/storm-dist/binary/storm-kafka-monitor-bin/pom.xml
index 1855762..6efcc04 100644
--- a/storm-dist/binary/storm-kafka-monitor-bin/pom.xml
+++ b/storm-dist/binary/storm-kafka-monitor-bin/pom.xml
@@ -20,7 +20,7 @@
     <parent>
         <groupId>org.apache.storm</groupId>
         <artifactId>apache-storm-bin</artifactId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
     </parent>
     <artifactId>storm-kafka-monitor-bin</artifactId>
     <packaging>pom</packaging>
diff --git a/storm-dist/binary/storm-sql-core-bin/pom.xml b/storm-dist/binary/storm-sql-core-bin/pom.xml
index 3236a01..bad85b9 100644
--- a/storm-dist/binary/storm-sql-core-bin/pom.xml
+++ b/storm-dist/binary/storm-sql-core-bin/pom.xml
@@ -20,7 +20,7 @@
     <parent>
         <groupId>org.apache.storm</groupId>
         <artifactId>apache-storm-bin</artifactId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
     </parent>
     <artifactId>storm-sql-core-bin</artifactId>
     <packaging>pom</packaging>
diff --git a/storm-dist/binary/storm-sql-runtime-bin/pom.xml b/storm-dist/binary/storm-sql-runtime-bin/pom.xml
index f148f67..da663a6 100644
--- a/storm-dist/binary/storm-sql-runtime-bin/pom.xml
+++ b/storm-dist/binary/storm-sql-runtime-bin/pom.xml
@@ -20,7 +20,7 @@
     <parent>
         <groupId>org.apache.storm</groupId>
         <artifactId>apache-storm-bin</artifactId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
     </parent>
     <artifactId>storm-sql-runtime-bin</artifactId>
     <packaging>pom</packaging>
diff --git a/storm-dist/binary/storm-submit-tools-bin/pom.xml b/storm-dist/binary/storm-submit-tools-bin/pom.xml
index 794507a..491291c 100644
--- a/storm-dist/binary/storm-submit-tools-bin/pom.xml
+++ b/storm-dist/binary/storm-submit-tools-bin/pom.xml
@@ -20,7 +20,7 @@
     <parent>
         <groupId>org.apache.storm</groupId>
         <artifactId>apache-storm-bin</artifactId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
     </parent>
     <artifactId>storm-submit-tools-bin</artifactId>
     <packaging>pom</packaging>
diff --git a/storm-dist/binary/storm-webapp-bin/pom.xml b/storm-dist/binary/storm-webapp-bin/pom.xml
index 6918ca6..a9606f7 100644
--- a/storm-dist/binary/storm-webapp-bin/pom.xml
+++ b/storm-dist/binary/storm-webapp-bin/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>apache-storm-bin</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
 
diff --git a/storm-dist/source/pom.xml b/storm-dist/source/pom.xml
index ce6c494..f758914 100644
--- a/storm-dist/source/pom.xml
+++ b/storm-dist/source/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
diff --git a/storm-multilang/javascript/pom.xml b/storm-multilang/javascript/pom.xml
index 005b04a..aac4269 100644
--- a/storm-multilang/javascript/pom.xml
+++ b/storm-multilang/javascript/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
diff --git a/storm-multilang/python/pom.xml b/storm-multilang/python/pom.xml
index 99f5b65..a7aeb1e 100644
--- a/storm-multilang/python/pom.xml
+++ b/storm-multilang/python/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
diff --git a/storm-multilang/ruby/pom.xml b/storm-multilang/ruby/pom.xml
index c2589b6..afe61d0 100644
--- a/storm-multilang/ruby/pom.xml
+++ b/storm-multilang/ruby/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../../pom.xml</relativePath>
     </parent>
 
diff --git a/storm-server/pom.xml b/storm-server/pom.xml
index 2a93616..06a641b 100644
--- a/storm-server/pom.xml
+++ b/storm-server/pom.xml
@@ -21,7 +21,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>..</relativePath>
     </parent>
     <modelVersion>4.0.0</modelVersion>
@@ -125,6 +125,11 @@
             <groupId>org.apache.httpcomponents</groupId>
             <artifactId>httpclient</artifactId>
         </dependency>
+        <dependency>
+            <groupId>org.junit.jupiter</groupId>
+            <artifactId>junit-jupiter-params</artifactId>
+            <scope>test</scope>
+        </dependency>
     </dependencies>
 
     <build>
@@ -180,9 +185,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>763</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/storm-server/src/main/java/org/apache/storm/DaemonConfig.java b/storm-server/src/main/java/org/apache/storm/DaemonConfig.java
index 26cacff..def49ae 100644
--- a/storm-server/src/main/java/org/apache/storm/DaemonConfig.java
+++ b/storm-server/src/main/java/org/apache/storm/DaemonConfig.java
@@ -18,6 +18,21 @@
 
 package org.apache.storm;
 
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsBoolean;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsImplementationOfClass;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsInteger;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsListEntryCustom;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsMapEntryCustom;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsMapEntryType;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsNoDuplicateInList;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsNumber;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsPositiveNumber;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsString;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsStringList;
+import static org.apache.storm.validation.ConfigValidationAnnotations.IsStringOrStringList;
+import static org.apache.storm.validation.ConfigValidationAnnotations.NotNull;
+import static org.apache.storm.validation.ConfigValidationAnnotations.Password;
+
 import java.util.ArrayList;
 import java.util.Map;
 import org.apache.storm.container.ResourceIsolationInterface;
@@ -31,27 +46,12 @@
 import org.apache.storm.validation.ConfigValidation;
 import org.apache.storm.validation.Validated;
 
-import static org.apache.storm.validation.ConfigValidationAnnotations.NotNull;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsBoolean;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsImplementationOfClass;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsInteger;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsListEntryCustom;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsMapEntryCustom;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsMapEntryType;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsNoDuplicateInList;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsNumber;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsPositiveNumber;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsString;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsStringList;
-import static org.apache.storm.validation.ConfigValidationAnnotations.IsStringOrStringList;
-import static org.apache.storm.validation.ConfigValidationAnnotations.Password;
-
 /**
  * Storm configs are specified as a plain old map. This class provides constants for all the configurations possible on a Storm cluster.
  * Each constant is paired with an annotation that defines the validity criterion of the corresponding field. Default values for these
  * configs can be found in defaults.yaml.
  *
- * This class extends {@link org.apache.storm.Config} for supporting Storm Daemons.
+ * <p>This class extends {@link org.apache.storm.Config} for supporting Storm Daemons.
  */
 public class DaemonConfig implements Validated {
 
@@ -90,7 +90,7 @@
     /**
      * A global task scheduler used to assign topologies's tasks to supervisors' workers.
      *
-     * If this is not set, a default system scheduler will be used.
+     * <p>If this is not set, a default system scheduler will be used.
      */
     @IsString
     public static final String STORM_SCHEDULER = "storm.scheduler";
@@ -138,6 +138,18 @@
     public static final String BLACKLIST_SCHEDULER_STRATEGY = "blacklist.scheduler.strategy";
 
     /**
+     * Whether {@link org.apache.storm.scheduler.blacklist.BlacklistScheduler} will assume the supervisor is bad
+     * based on bad slots or not.
+     * A bad slot indicates the situation where the nimbus doesn't receive heartbeat from the worker in time,
+     * it's hard to differentiate if it's because of the supervisor node or the worker itself.
+     * If this is set to true, the scheduler will consider a supervisor is bad when seeing bad slots in it.
+     * Otherwise, the scheduler will assume a supervisor is bad only when it does not receive supervisor heartbeat in time.
+     */
+    @IsBoolean
+    public static final String BLACKLIST_SCHEDULER_ASSUME_SUPERVISOR_BAD_BASED_ON_BAD_SLOT
+            = "blacklist.scheduler.assume.supervisor.bad.based.on.bad.slot";
+
+    /**
      * Whether we want to display all the resource capacity and scheduled usage on the UI page. You MUST have this variable set if you are
      * using any kind of resource-related scheduler.
      * <p/>
@@ -200,9 +212,9 @@
     /**
      * The length of time a jar file lives in the inbox before being deleted by the cleanup thread.
      *
-     * Probably keep this value greater than or equal to NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS. Note that the time it takes to delete an
-     * inbox jar file is going to be somewhat more than NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS (depending on how often
-     * NIMBUS_CLEANUP_FREQ_SECS is set to).
+     * <p>Probably keep this value greater than or equal to NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS. Note that the time
+     * it takes to delete an inbox jar file is going to be somewhat more than NIMBUS_CLEANUP_INBOX_JAR_EXPIRATION_SECS
+     * (depending on how often NIMBUS_CLEANUP_FREQ_SECS is set to).
      *
      * @see #NIMBUS_CLEANUP_INBOX_FREQ_SECS
      */
diff --git a/storm-server/src/main/java/org/apache/storm/ILocalClusterTrackedTopologyAware.java b/storm-server/src/main/java/org/apache/storm/ILocalClusterTrackedTopologyAware.java
index 4927da4..5e94a09 100644
--- a/storm-server/src/main/java/org/apache/storm/ILocalClusterTrackedTopologyAware.java
+++ b/storm-server/src/main/java/org/apache/storm/ILocalClusterTrackedTopologyAware.java
@@ -28,8 +28,9 @@
  * Topology.
  */
 public interface ILocalClusterTrackedTopologyAware extends ILocalCluster {
+
     /**
-     * Submit a tracked topology to be run in local mode
+     * Submit a tracked topology to be run in local mode.
      *
      * @param topologyName the name of the topology to use
      * @param conf         the config for the topology
@@ -41,7 +42,7 @@
     ILocalTopology submitTopology(String topologyName, Map<String, Object> conf, TrackedTopology topology) throws TException;
 
     /**
-     * Submit a tracked topology to be run in local mode
+     * Submit a tracked topology to be run in local mode.
      *
      * @param topologyName the name of the topology to use
      * @param conf         the config for the topology
diff --git a/storm-server/src/main/java/org/apache/storm/LocalCluster.java b/storm-server/src/main/java/org/apache/storm/LocalCluster.java
index eb7a746..636fff5 100644
--- a/storm-server/src/main/java/org/apache/storm/LocalCluster.java
+++ b/storm-server/src/main/java/org/apache/storm/LocalCluster.java
@@ -334,6 +334,7 @@
      *
      * @throws Exception on any Exception.
      */
+    @SuppressWarnings("checkstyle:VariableDeclarationUsageDistance")
     public static <T> T withLocalModeOverride(Callable<T> c, long ttlSec, Map<String, Object> daemonConf) throws Exception {
         LOG.info("\n\n\t\tSTARTING LOCAL MODE CLUSTER\n\n");
         Builder builder = new Builder();
@@ -422,6 +423,7 @@
     }
 
     /**
+     * Reference to nimbus.
      * @return Nimbus itself so you can interact with it directly, if needed.
      */
     public Nimbus getNimbus() {
@@ -429,6 +431,7 @@
     }
 
     /**
+     * Reference to metrics registry.
      * @return The metrics registry for the local cluster.
      */
     public StormMetricsRegistry getMetricRegistry() {
@@ -436,7 +439,8 @@
     }
 
     /**
-     * @return the base config for the daemons.
+     * Get daemon configuration.
+     * @return the base config for the daemons
      */
     public Map<String, Object> getDaemonConf() {
         return new HashMap<>(daemonConf);
@@ -463,6 +467,24 @@
     }
 
     @Override
+    public LocalTopology submitTopology(String topologyName, Map<String, Object> conf, TrackedTopology topology)
+            throws TException {
+        return submitTopology(topologyName, conf, topology.getTopology());
+    }
+
+    @Override
+    public void submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology)
+            throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, TException {
+        try {
+            @SuppressWarnings("unchecked")
+            Map<String, Object> conf = (Map<String, Object>) JSONValue.parseWithException(jsonConf);
+            submitTopology(name, conf, topology);
+        } catch (ParseException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    @Override
     public LocalTopology submitTopologyWithOpts(String topologyName, Map<String, Object> conf, StormTopology topology,
                                                 SubmitOptions submitOpts)
         throws TException {
@@ -474,12 +496,6 @@
     }
 
     @Override
-    public LocalTopology submitTopology(String topologyName, Map<String, Object> conf, TrackedTopology topology)
-        throws TException {
-        return submitTopology(topologyName, conf, topology.getTopology());
-    }
-
-    @Override
     public LocalTopology submitTopologyWithOpts(String topologyName, Map<String, Object> conf, TrackedTopology topology,
                                                 SubmitOptions submitOpts)
         throws TException {
@@ -487,6 +503,19 @@
     }
 
     @Override
+    public void submitTopologyWithOpts(String name, String uploadedJarLocation, String jsonConf, StormTopology topology,
+            SubmitOptions options)
+            throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, TException {
+        try {
+            @SuppressWarnings("unchecked")
+            Map<String, Object> conf = (Map<String, Object>) JSONValue.parseWithException(jsonConf);
+            submitTopologyWithOpts(name, conf, topology, options);
+        } catch (ParseException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    @Override
     public void uploadNewCredentials(String topologyName, Credentials creds) throws TException {
         getNimbus().uploadNewCredentials(topologyName, creds);
     }
@@ -792,34 +821,9 @@
         return trackId;
     }
 
-    @Override
-    public void submitTopology(String name, String uploadedJarLocation, String jsonConf, StormTopology topology)
-        throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, TException {
-        try {
-            @SuppressWarnings("unchecked")
-            Map<String, Object> conf = (Map<String, Object>) JSONValue.parseWithException(jsonConf);
-            submitTopology(name, conf, topology);
-        } catch (ParseException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
     //Nimbus Compatibility
 
     @Override
-    public void submitTopologyWithOpts(String name, String uploadedJarLocation, String jsonConf, StormTopology topology,
-                                       SubmitOptions options)
-        throws AlreadyAliveException, InvalidTopologyException, AuthorizationException, TException {
-        try {
-            @SuppressWarnings("unchecked")
-            Map<String, Object> conf = (Map<String, Object>) JSONValue.parseWithException(jsonConf);
-            submitTopologyWithOpts(name, conf, topology, options);
-        } catch (ParseException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
-    @Override
     public void setLogConfig(String name, LogConfig config) throws TException {
         // TODO Auto-generated method stub
         throw new RuntimeException("NOT IMPLEMENTED YET");
@@ -1239,7 +1243,7 @@
 
         private final String id;
 
-        public TrackedStormCommon(String id) {
+        TrackedStormCommon(String id) {
             this.id = id;
         }
 
diff --git a/storm-server/src/main/java/org/apache/storm/LocalDRPC.java b/storm-server/src/main/java/org/apache/storm/LocalDRPC.java
index ba243cc..b11b2db 100644
--- a/storm-server/src/main/java/org/apache/storm/LocalDRPC.java
+++ b/storm-server/src/main/java/org/apache/storm/LocalDRPC.java
@@ -30,10 +30,11 @@
 import org.apache.storm.utils.ServiceRegistry;
 
 /**
- * A Local way to test DRPC
+ * A Local way to test DRPC.
  *
- * try (LocalDRPC drpc = new LocalDRPC()) { // Do tests }
+ * <p>try <code>(LocalDRPC drpc = new LocalDRPC()) { // Do tests }</code>
  */
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public class LocalDRPC implements ILocalDRPC {
 
     private final DRPC drpc;
diff --git a/storm-server/src/main/java/org/apache/storm/ProcessSimulator.java b/storm-server/src/main/java/org/apache/storm/ProcessSimulator.java
index f2e81cf..e332d6e 100644
--- a/storm-server/src/main/java/org/apache/storm/ProcessSimulator.java
+++ b/storm-server/src/main/java/org/apache/storm/ProcessSimulator.java
@@ -31,28 +31,21 @@
     private static Object lock = new Object();
 
     /**
-     * Register a process' handle
-     *
-     * @param pid
-     * @param shutdownable
+     * Register a process' handle.
      */
     public static void registerProcess(String pid, Shutdownable shutdownable) {
         processMap.put(pid, shutdownable);
     }
 
     /**
-     * Get all process handles
-     *
-     * @return
+     * Get all process handles.
      */
     public static Collection<Shutdownable> getAllProcessHandles() {
         return processMap.values();
     }
 
     /**
-     * Kill a process
-     *
-     * @param pid
+     * Kill a process.
      */
     public static void killProcess(String pid) {
         synchronized (lock) {
@@ -67,7 +60,7 @@
     }
 
     /**
-     * Kill all processes
+     * Kill all processes.
      */
     public static void killAllProcesses() {
         Set<String> pids = processMap.keySet();
diff --git a/storm-server/src/main/java/org/apache/storm/Testing.java b/storm-server/src/main/java/org/apache/storm/Testing.java
index 8f46a33..45e585c 100644
--- a/storm-server/src/main/java/org/apache/storm/Testing.java
+++ b/storm-server/src/main/java/org/apache/storm/Testing.java
@@ -79,7 +79,7 @@
 
     /**
      * Continue to execute body repeatedly until condition is true or TEST_TIMEOUT_MS has
-     * passed
+     * passed.
      * @param condition what we are waiting for
      * @param body what to run in the loop
      * @throws AssertionError if the loop timed out.
@@ -90,7 +90,7 @@
 
     /**
      * Continue to execute body repeatedly until condition is true or TEST_TIMEOUT_MS has
-     * passed
+     * passed.
      * @param timeoutMs the number of ms to wait before timing out.
      * @param condition what we are waiting for
      * @param body what to run in the loop
@@ -113,19 +113,20 @@
     }
 
     /**
-     * Convenience method for data.stream.allMatch(pred)
+     * Convenience method for data.stream.allMatch(pred).
      */
     public static <T> boolean isEvery(Collection<T> data, Predicate<T> pred) {
         return data.stream().allMatch(pred);
     }
 
     /**
-     * Run with simulated time
+     * Run with simulated time.
+     *
      * @deprecated use ```
-     * try (Time.SimulatedTime time = new Time.SimulatedTime()) {
-     *  ...
-     * }
-     * ```
+     *     try (Time.SimulatedTime time = new Time.SimulatedTime()) {
+     *      ...
+     *     }
+     *     ```
      * @param code what to run
      */
     @Deprecated
@@ -157,22 +158,23 @@
             conf = new HashMap<>();
         }
         return new LocalCluster.Builder()
-            .withSupervisors(supervisors)
-            .withPortsPerSupervisor(ports)
-            .withDaemonConf(conf)
-            .withNimbusDaemon(param.isNimbusDaemon())
-            .withTracked(id)
-            .withSimulatedTime(simulated)
-            .build();
+                .withSupervisors(supervisors)
+                .withPortsPerSupervisor(ports)
+                .withDaemonConf(conf)
+                .withNimbusDaemon(param.isNimbusDaemon())
+                .withTracked(id)
+                .withSimulatedTime(simulated)
+                .build();
     }
 
     /**
-     * Run with a local cluster
+     * Run with a local cluster.
+     *
      * @deprecated use ```
-     * try (LocalCluster cluster = new LocalCluster()) {
-     *  ...
-     * }
-     * ```
+     *     try (LocalCluster cluster = new LocalCluster()) {
+     *      ...
+     *     }
+     *     ```
      * @param code what to run
      */
     @Deprecated
@@ -181,12 +183,13 @@
     }
 
     /**
-     * Run with a local cluster
+     * Run with a local cluster.
+     *
      * @deprecated use ```
-     * try (LocalCluster cluster = new LocalCluster.Builder()....build()) {
-     *  ...
-     * }
-     * ```
+     *     try (LocalCluster cluster = new LocalCluster.Builder()....build()) {
+     *      ...
+     *     }
+     *     ```
      * @param param configs to set in the cluster
      * @param code what to run
      */
@@ -200,12 +203,13 @@
     }
 
     /**
-     * Run with a local cluster
+     * Run with a local cluster.
+     *
      * @deprecated use ```
-     * try (LocalCluster cluster = new LocalCluster.Builder()....build()) {
-     *  ...
-     * }
-     * ```
+     *     try (LocalCluster cluster = new LocalCluster.Builder()....build()) {
+     *      ...
+     *     }
+     *     ```
      * @param clusterConf some configs to set in the cluster
      */
     @Deprecated
@@ -235,12 +239,13 @@
     }
 
     /**
-     * Run with a local cluster
+     * Run with a local cluster.
+     *
      * @deprecated use ```
-     * try (LocalCluster cluster = new LocalCluster.Builder().withSimulatedTime().build()) {
-     *  ...
-     * }
-     * ```
+     *     try (LocalCluster cluster = new LocalCluster.Builder().withSimulatedTime().build()) {
+     *      ...
+     *     }
+     *     ```
      * @param code what to run
      */
     @Deprecated
@@ -249,12 +254,13 @@
     }
 
     /**
-     * Run with a local cluster
+     * Run with a local cluster.
+     *
      * @deprecated use ```
-     * try (LocalCluster cluster = new LocalCluster.Builder().withSimulatedTime()....build()) {
-     *  ...
-     * }
-     * ```
+     *     try (LocalCluster cluster = new LocalCluster.Builder().withSimulatedTime()....build()) {
+     *      ...
+     *     }
+     *     ```
      * @param param configs to set in the cluster
      * @param code what to run
      */
@@ -268,12 +274,13 @@
     }
 
     /**
-     * Run with a local cluster
+     * Run with a local cluster.
+     *
      * @deprecated use ```
-     * try (LocalCluster cluster = new LocalCluster.Builder().withTracked().build()) {
-     *  ...
-     * }
-     * ```
+     *     try (LocalCluster cluster = new LocalCluster.Builder().withTracked().build()) {
+     *      ...
+     *     }
+     *     ```
      * @param code what to run
      */
     @Deprecated
@@ -282,26 +289,13 @@
     }
 
     /**
-     * In a tracked topology some metrics are tracked.  This provides a way to get those metrics.
-     * This is intended mostly for internal testing.
-     * @param id the id of the tracked cluster
-     * @param key the name of the metric to get.
-     * @return the metric
-     */
-    @SuppressWarnings("unchecked")
-    @Deprecated
-    public static int globalAmt(String id, String key) {
-        LOG.warn("Reading tracked metrics for ID {}", id);
-        return ((ConcurrentHashMap<String, AtomicInteger>) RegisteredGlobalState.getState(id)).get(key).get();
-    }
-
-    /**
-     * Run with a local tracked cluster
+     * Run with a local tracked cluster.
+     *
      * @deprecated use ```
-     * try (LocalCluster cluster = new LocalCluster.Builder().withTracked()....build()) {
-     *  ...
-     * }
-     * ```
+     *     try (LocalCluster cluster = new LocalCluster.Builder().withTracked()....build()) {
+     *      ...
+     *     }
+     *     ```
      * @param param configs to set in the cluster
      * @param code what to run
      */
@@ -315,6 +309,21 @@
     }
 
     /**
+     * In a tracked topology some metrics are tracked.  This provides a way to get those metrics.
+     * This is intended mostly for internal testing.
+     *
+     * @param id the id of the tracked cluster
+     * @param key the name of the metric to get.
+     * @return the metric
+     */
+    @SuppressWarnings("unchecked")
+    @Deprecated
+    public static int globalAmt(String id, String key) {
+        LOG.warn("Reading tracked metrics for ID {}", id);
+        return ((ConcurrentHashMap<String, AtomicInteger>) RegisteredGlobalState.getState(id)).get(key).get();
+    }
+
+    /**
      * Track and capture a topology.
      * This is intended mostly for internal testing.
      */
@@ -324,10 +333,10 @@
     }
 
     /**
-     * Rewrites a topology so that all the tuples flowing through it are captured
+     * Rewrites a topology so that all the tuples flowing through it are captured.
      * @param topology the topology to rewrite
      * @return the modified topology and a new Bolt that can retrieve the
-     * captured tuples.
+     *     captured tuples.
      */
     public static CapturedTopology<StormTopology> captureTopology(StormTopology topology) {
         topology = topology.deepCopy(); //Don't modify the original
@@ -366,12 +375,11 @@
 
     /**
      * Run a topology to completion capturing all of the messages that are emitted.  This only works when all of the spouts are
-     * instances of {@link org.apache.storm.testing.CompletableSpout}
+     * instances of {@link org.apache.storm.testing.CompletableSpout}.
      * @param cluster the cluster to submit the topology to
      * @param topology the topology itself
-     * @return a map of the component to the list of tuples it emitted.
-     * @throws InterruptedException
-     * @throws TException on any error from nimbus.
+     * @return a map of the component to the list of tuples it emitted
+     * @throws TException on any error from nimbus
      */
     public static Map<String, List<FixedTuple>> completeTopology(ILocalCluster cluster, StormTopology topology) throws InterruptedException,
         TException {
@@ -383,15 +391,13 @@
      * instances of {@link org.apache.storm.testing.CompletableSpout} or are overwritten by MockedSources in param
      * @param cluster the cluster to submit the topology to
      * @param topology the topology itself
-     * @param param parameters to describe how to complete a topology.
-     * @return a map of the component to the list of tuples it emitted.
-     * @throws InterruptedException
+     * @param param parameters to describe how to complete a topology
+     * @return a map of the component to the list of tuples it emitted
      * @throws TException on any error from nimbus.
      */
     public static Map<String, List<FixedTuple>> completeTopology(ILocalCluster cluster, StormTopology topology,
                                                                  CompleteTopologyParam param) throws TException, InterruptedException {
         Map<String, List<FixedTuple>> ret = null;
-        IStormClusterState state = cluster.getClusterState();
         CapturedTopology<StormTopology> capTopo = captureTopology(topology);
         topology = capTopo.topology;
         String topoName = param.getTopologyName();
@@ -407,8 +413,10 @@
                 spouts.get(mocked.getKey()).set_spout_object(Thrift.serializeComponentObject(newSpout));
             }
         }
-        List<Object> spoutObjects = spouts.values().stream().
-            map((spec) -> Thrift.deserializeComponentObject(spec.get_spout_object())).collect(Collectors.toList());
+        List<Object> spoutObjects = spouts.values()
+                .stream()
+                .map((spec) -> Thrift.deserializeComponentObject(spec.get_spout_object()))
+                .collect(Collectors.toList());
 
         for (Object o : spoutObjects) {
             if (!(o instanceof CompletableSpout)) {
@@ -427,6 +435,7 @@
             cluster.advanceClusterTime(11);
         }
 
+        IStormClusterState state = cluster.getClusterState();
         String topoId = state.getTopoId(topoName).get();
         //Give the topology time to come up without using it to wait for the spouts to complete
         simulateWait(cluster);
@@ -435,28 +444,28 @@
             timeoutMs = TEST_TIMEOUT_MS;
         }
         whileTimeout(timeoutMs,
-                     () -> !isEvery(spoutObjects, (o) -> ((CompletableSpout) o).isExhausted()),
-                     () -> {
-                         try {
-                             simulateWait(cluster);
-                         } catch (Exception e) {
-                             throw new RuntimeException();
-                         }
-                     });
+            () -> !isEvery(spoutObjects, (o) -> ((CompletableSpout) o).isExhausted()),
+            () -> {
+                try {
+                    simulateWait(cluster);
+                } catch (Exception e) {
+                    throw new RuntimeException();
+                }
+            });
 
         KillOptions killOpts = new KillOptions();
         killOpts.set_wait_secs(0);
         cluster.killTopologyWithOpts(topoName, killOpts);
 
         whileTimeout(timeoutMs,
-                     () -> state.assignmentInfo(topoId, null) != null,
-                     () -> {
-                         try {
-                             simulateWait(cluster);
-                         } catch (Exception e) {
-                             throw new RuntimeException();
-                         }
-                     });
+            () -> state.assignmentInfo(topoId, null) != null,
+            () -> {
+                try {
+                    simulateWait(cluster);
+                } catch (Exception e) {
+                    throw new RuntimeException();
+                }
+            });
 
         if (param.getCleanupState()) {
             for (Object o : spoutObjects) {
@@ -471,7 +480,7 @@
     }
 
     /**
-     * If using simulated time simulate waiting for 10 seconds.  This is intended for internal testing only.
+     * If using simulated time simulate waiting for 10 seconds. This is intended for internal testing only.
      */
     public static void simulateWait(ILocalCluster cluster) throws InterruptedException {
         if (Time.isSimulating()) {
@@ -481,7 +490,7 @@
     }
 
     /**
-     * Get all of the tuples from a given component on the default stream
+     * Get all of the tuples from a given component on the default stream.
      * @param results the results of running a completed topology
      * @param componentId the id of the component to look at
      * @return a list of the tuple values.
@@ -491,7 +500,7 @@
     }
 
     /**
-     * Get all of the tuples from a given component on a given stream
+     * Get all of the tuples from a given component on a given stream.
      * @param results the results of running a completed topology
      * @param componentId the id of the component to look at
      * @param streamId the id of the stream to look for.
@@ -520,63 +529,63 @@
     }
 
     /**
-     * Simulated time wait for a tracked topology.  This is intended for internal testing
+     * Simulated time wait for a tracked topology.  This is intended for internal testing.
      */
     public static void trackedWait(CapturedTopology<TrackedTopology> topo) {
         topo.topology.trackedWait();
     }
 
     /**
-     * Simulated time wait for a tracked topology.  This is intended for internal testing
+     * Simulated time wait for a tracked topology.  This is intended for internal testing.
      */
     public static void trackedWait(CapturedTopology<TrackedTopology> topo, Integer amt) {
         topo.topology.trackedWait(amt);
     }
 
     /**
-     * Simulated time wait for a tracked topology.  This is intended for internal testing
+     * Simulated time wait for a tracked topology.  This is intended for internal testing.
      */
     public static void trackedWait(CapturedTopology<TrackedTopology> topo, Integer amt, Integer timeoutMs) {
         topo.topology.trackedWait(amt, timeoutMs);
     }
 
     /**
-     * Simulated time wait for a tracked topology.  This is intended for internal testing
+     * Simulated time wait for a tracked topology.  This is intended for internal testing.
      */
     public static void trackedWait(TrackedTopology topo) {
         topo.trackedWait();
     }
 
     /**
-     * Simulated time wait for a tracked topology.  This is intended for internal testing
+     * Simulated time wait for a tracked topology.  This is intended for internal testing.
      */
     public static void trackedWait(TrackedTopology topo, Integer amt) {
         topo.trackedWait(amt);
     }
 
     /**
-     * Simulated time wait for a tracked topology.  This is intended for internal testing
+     * Simulated time wait for a tracked topology.  This is intended for internal testing.
      */
     public static void trackedWait(TrackedTopology topo, Integer amt, Integer timeoutMs) {
         topo.trackedWait(amt, timeoutMs);
     }
 
     /**
-     * Simulated time wait for a cluster.  This is intended for internal testing
+     * Simulated time wait for a cluster.  This is intended for internal testing.
      */
     public static void advanceClusterTime(ILocalCluster cluster, Integer secs) throws InterruptedException {
         advanceClusterTime(cluster, secs, 1);
     }
 
     /**
-     * Simulated time wait for a cluster.  This is intended for internal testing
+     * Simulated time wait for a cluster.  This is intended for internal testing.
      */
     public static void advanceClusterTime(ILocalCluster cluster, Integer secs, Integer step) throws InterruptedException {
         cluster.advanceClusterTime(secs, step);
     }
 
     /**
-     * Count how many times each element appears in the Collection
+     * Count how many times each element appears in the Collection.
      * @param c a collection of values
      * @return a map of the unique values in c to the count of those values.
      */
@@ -627,7 +636,7 @@
     }
 
     /**
-     * Create a {@link org.apache.storm.tuple.Tuple} for use with testing
+     * Create a {@link org.apache.storm.tuple.Tuple} for use with testing.
      * @param values the values to appear in the tuple
      */
     public static Tuple testTuple(List<Object> values) {
@@ -635,7 +644,7 @@
     }
 
     /**
-     * Create a {@link org.apache.storm.tuple.Tuple} for use with testing
+     * Create a {@link org.apache.storm.tuple.Tuple} for use with testing.
      * @param values the values to appear in the tuple
      * @param param parametrs describing more details about the tuple
      */
@@ -691,8 +700,8 @@
     /**
      * Simply produces a boolean to see if a specific state is true or false.
      */
-    public static interface Condition {
-        public boolean exec();
+    public interface Condition {
+        boolean exec();
     }
 
     /**
diff --git a/storm-server/src/main/java/org/apache/storm/blobstore/BlobStoreUtils.java b/storm-server/src/main/java/org/apache/storm/blobstore/BlobStoreUtils.java
index 00d833f..a0644fa 100644
--- a/storm-server/src/main/java/org/apache/storm/blobstore/BlobStoreUtils.java
+++ b/storm-server/src/main/java/org/apache/storm/blobstore/BlobStoreUtils.java
@@ -49,6 +49,7 @@
         return BLOBSTORE_SUBTREE;
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     public static CuratorFramework createZKClient(Map<String, Object> conf, DaemonType type) {
         @SuppressWarnings("unchecked")
         List<String> zkServers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
@@ -191,7 +192,7 @@
                     out = null;
                 }
                 isSuccess = true;
-            } catch(FileNotFoundException fnf) {
+            } catch (FileNotFoundException fnf) {
                 LOG.warn("Blobstore file for key '{}' does not exist or got deleted before it could be downloaded.", key, fnf);
             } catch (IOException | AuthorizationException exception) {
                 throw new RuntimeException(exception);
diff --git a/storm-server/src/main/java/org/apache/storm/blobstore/FileBlobStoreImpl.java b/storm-server/src/main/java/org/apache/storm/blobstore/FileBlobStoreImpl.java
index 4552981..3c87a61 100644
--- a/storm-server/src/main/java/org/apache/storm/blobstore/FileBlobStoreImpl.java
+++ b/storm-server/src/main/java/org/apache/storm/blobstore/FileBlobStoreImpl.java
@@ -38,12 +38,13 @@
  * Very basic blob store impl with no ACL handling.
  */
 public class FileBlobStoreImpl {
-    private static final long FULL_CLEANUP_FREQ = 60 * 60 * 1000l;
+    private static final long FULL_CLEANUP_FREQ = 60 * 60 * 1000L;
     private static final int BUCKETS = 1024;
     private static final Logger LOG = LoggerFactory.getLogger(FileBlobStoreImpl.class);
     private static final Timer timer = new Timer("FileBlobStore cleanup thread", true);
     private File fullPath;
     private TimerTask cleanup = null;
+
     public FileBlobStoreImpl(File path, Map<String, Object> conf) throws IOException {
         LOG.info("Creating new blob store based in {}", path);
         fullPath = path;
@@ -66,13 +67,22 @@
     }
 
     /**
-     * @return all keys that are available for reading.
-     * @throws IOException on any error.
+     * List keys.
+     * @return all keys that are available for reading
+     * @throws IOException on any error
      */
     public Iterator<String> listKeys() throws IOException {
         return new KeyInHashDirIterator();
     }
 
+    protected Iterator<String> listKeys(File path) throws IOException {
+        String[] files = path.list();
+        if (files != null) {
+            return Arrays.asList(files).iterator();
+        }
+        return new LinkedList<String>().iterator();
+    }
+
     /**
      * Get an input stream for reading a part.
      * @param key the key of the part to read.
@@ -103,7 +113,7 @@
     }
 
     /**
-     * Delete a key from the blob store
+     * Delete a key from the blob store.
      * @param key the key to delete
      * @throws IOException on any error
      */
@@ -164,14 +174,6 @@
         return ret.iterator();
     }
 
-    protected Iterator<String> listKeys(File path) throws IOException {
-        String[] files = path.list();
-        if (files != null) {
-            return Arrays.asList(files).iterator();
-        }
-        return new LinkedList<String>().iterator();
-    }
-
     protected void delete(File path) throws IOException {
         if (Files.exists(path.toPath())) {
 
diff --git a/storm-server/src/main/java/org/apache/storm/blobstore/KeySequenceNumber.java b/storm-server/src/main/java/org/apache/storm/blobstore/KeySequenceNumber.java
index 611d33c..39e1747 100644
--- a/storm-server/src/main/java/org/apache/storm/blobstore/KeySequenceNumber.java
+++ b/storm-server/src/main/java/org/apache/storm/blobstore/KeySequenceNumber.java
@@ -17,11 +17,11 @@
 import java.util.TreeSet;
 import org.apache.storm.generated.KeyNotFoundException;
 import org.apache.storm.nimbus.NimbusInfo;
-import org.apache.storm.utils.WrappedKeyNotFoundException;
 import org.apache.storm.shade.org.apache.curator.framework.CuratorFramework;
 import org.apache.storm.shade.org.apache.zookeeper.CreateMode;
 import org.apache.storm.shade.org.apache.zookeeper.KeeperException;
 import org.apache.storm.shade.org.apache.zookeeper.ZooDefs;
+import org.apache.storm.utils.WrappedKeyNotFoundException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -39,16 +39,16 @@
  * nimbus and the non-leader nimbus syncs after a call back is triggered by attempting
  * to download the blob and finally updates its state inside the zookeeper.
  *
- * A watch is placed on the /storm/blobstore/key1 and the znodes leader:8080-1 and
+ * <p>A watch is placed on the /storm/blobstore/key1 and the znodes leader:8080-1 and
  * non-leader:8080-1 are ephemeral which implies that these nodes exist only until the
  * connection between the corresponding nimbus and the zookeeper persist. If in case the
  * nimbus crashes the node disappears under /storm/blobstore/key1.
  *
- * The sequence number for the keys are handed over based on the following scenario:
+ * <p>The sequence number for the keys are handed over based on the following scenario:
  * Lets assume there are three nimbodes up and running, one being the leader and the other
  * being the non-leader.
  *
- * 1. Create is straight forward.
+ * <p>1. Create is straight forward.
  * Check whether the znode -> /storm/blobstore/key1 has been created or not. It implies
  * the blob has not been created yet. If not created, it creates it and updates the zookeeper
  * states under /storm/blobstore/key1 and /storm/blobstoremaxkeysequencenumber/key1.
@@ -58,65 +58,65 @@
  * indicating the true value of number of updates for a blob. This node helps to maintain sanity in case
  * leadership changes due to crashing.
  *
- * 2. Delete does not require to hand over the sequence number.
+ * <p>2. Delete does not require to hand over the sequence number.
  *
- * 3. Finally, the update has few scenarios.
+ * <p>3. Finally, the update has few scenarios.
  *
- *  The class implements a TreeSet. The basic idea is if all the nimbodes have the same
- *  sequence number for the blob, then the number of elements in the set is 1 which holds
- *  the latest value of sequence number. If the number of elements are greater than 1 then it
- *  implies that there is sequence mismatch and there is need for syncing the blobs across
- *  nimbodes.
+ * <p>The class implements a TreeSet. The basic idea is if all the nimbodes have the same
+ * sequence number for the blob, then the number of elements in the set is 1 which holds
+ * the latest value of sequence number. If the number of elements are greater than 1 then it
+ * implies that there is sequence mismatch and there is need for syncing the blobs across
+ * nimbodes.
  *
- *  The logic for handing over sequence numbers based on the state are described as follows
- *  Here consider Nimbus-1 alias as N1 and Nimbus-2 alias as N2.
- *  Scenario 1:
- *  Example: Normal create/update scenario
- *  Operation     Nimbus-1:state     Nimbus-2:state     Seq-Num-Nimbus-1  Seq-Num-Nimbus-2          Max-Seq-Num
- *  Create-Key1   alive - Leader     alive              1                                           1
- *  Sync          alive - Leader     alive              1                 1 (callback -> download)  1
- *  Update-Key1   alive - Leader     alive              2                 1                         2
- *  Sync          alive - Leader     alive              2                 2 (callback -> download)  2
+ * <p>The logic for handing over sequence numbers based on the state are described as follows
+ * Here consider Nimbus-1 alias as N1 and Nimbus-2 alias as N2.
+ * Scenario 1:
+ * Example: Normal create/update scenario
+ * Operation     Nimbus-1:state     Nimbus-2:state     Seq-Num-Nimbus-1  Seq-Num-Nimbus-2          Max-Seq-Num
+ * Create-Key1   alive - Leader     alive              1                                           1
+ * Sync          alive - Leader     alive              1                 1 (callback -> download)  1
+ * Update-Key1   alive - Leader     alive              2                 1                         2
+ * Sync          alive - Leader     alive              2                 2 (callback -> download)  2
  *
- *  Scenario 2:
- *  Example: Leader nimbus crash followed by leader election, update and ex-leader restored again
- *  Operation     Nimbus-1:state     Nimbus-2:state     Seq-Num-Nimbus-1  Seq-Num-Nimbus-2          Max-Seq-Num
- *  Create        alive - Leader     alive              1                                           1
- *  Sync          alive - Leader     alive              1                 1 (callback -> download)  1
- *  Update        alive - Leader     alive              2                 1                         2
- *  Sync          alive - Leader     alive              2                 2 (callback -> download)  2
- *  Update        alive - Leader     alive              3                 2                         3
- *  Crash         crash - Leader     alive              3                 2                         3
- *  New - Leader  crash              alive - Leader     3 (Invalid)       2                         3
- *  Update        crash              alive - Leader     3 (Invalid)       4 (max-seq-num + 1)       4
- *  N1-Restored   alive              alive - Leader     0                 4                         4
- *  Sync          alive              alive - Leader     4                 4                         4
+ * <p>Scenario 2:
+ * Example: Leader nimbus crash followed by leader election, update and ex-leader restored again
+ * Operation     Nimbus-1:state     Nimbus-2:state     Seq-Num-Nimbus-1  Seq-Num-Nimbus-2          Max-Seq-Num
+ * Create        alive - Leader     alive              1                                           1
+ * Sync          alive - Leader     alive              1                 1 (callback -> download)  1
+ * Update        alive - Leader     alive              2                 1                         2
+ * Sync          alive - Leader     alive              2                 2 (callback -> download)  2
+ * Update        alive - Leader     alive              3                 2                         3
+ * Crash         crash - Leader     alive              3                 2                         3
+ * New - Leader  crash              alive - Leader     3 (Invalid)       2                         3
+ * Update        crash              alive - Leader     3 (Invalid)       4 (max-seq-num + 1)       4
+ * N1-Restored   alive              alive - Leader     0                 4                         4
+ * Sync          alive              alive - Leader     4                 4                         4
  *
- *  Scenario 3:
- *  Example: Leader nimbus crash followed by leader election, update and ex-leader restored again
- *  Operation     Nimbus-1:state     Nimbus-2:state     Seq-Num-Nimbus-1  Seq-Num-Nimbus-2          Max-Seq-Num
- *  Create        alive - Leader     alive              1                                           1
- *  Sync          alive - Leader     alive              1                 1 (callback -> download)  1
- *  Update        alive - Leader     alive              2                 1                         2
- *  Sync          alive - Leader     alive              2                 2 (callback -> download)  2
- *  Update        alive - Leader     alive              3                 2                         3
- *  Crash         crash - Leader     alive              3                 2                         3
- *  Elect Leader  crash              alive - Leader     3 (Invalid)       2                         3
- *  N1-Restored   alive              alive - Leader     3                 2                         3
- *  Read/Update   alive              alive - Leader     3                 4 (Downloads from N1)     4
- *  Sync          alive              alive - Leader     4 (callback)      4                         4
- *  Here the download is triggered whenever an operation corresponding to the blob is triggered on the
- *  nimbus like a read or update operation. Here, in the read/update call it is hard to know which call
- *  is read or update. Hence, by incrementing the sequence number to max-seq-num + 1 we ensure that the
- *  synchronization happens appropriately and all nimbodes have the same blob.
+ * <p>Scenario 3:
+ * Example: Leader nimbus crash followed by leader election, update and ex-leader restored again
+ * Operation     Nimbus-1:state     Nimbus-2:state     Seq-Num-Nimbus-1  Seq-Num-Nimbus-2          Max-Seq-Num
+ * Create        alive - Leader     alive              1                                           1
+ * Sync          alive - Leader     alive              1                 1 (callback -> download)  1
+ * Update        alive - Leader     alive              2                 1                         2
+ * Sync          alive - Leader     alive              2                 2 (callback -> download)  2
+ * Update        alive - Leader     alive              3                 2                         3
+ * Crash         crash - Leader     alive              3                 2                         3
+ * Elect Leader  crash              alive - Leader     3 (Invalid)       2                         3
+ * N1-Restored   alive              alive - Leader     3                 2                         3
+ * Read/Update   alive              alive - Leader     3                 4 (Downloads from N1)     4
+ * Sync          alive              alive - Leader     4 (callback)      4                         4
+ * Here the download is triggered whenever an operation corresponding to the blob is triggered on the
+ * nimbus like a read or update operation. Here, in the read/update call it is hard to know which call
+ * is read or update. Hence, by incrementing the sequence number to max-seq-num + 1 we ensure that the
+ * synchronization happens appropriately and all nimbodes have the same blob.
  */
 public class KeySequenceNumber {
     private static final Logger LOG = LoggerFactory.getLogger(KeySequenceNumber.class);
-    private final String BLOBSTORE_MAX_KEY_SEQUENCE_SUBTREE = "/blobstoremaxkeysequencenumber";
+    private static final String BLOBSTORE_MAX_KEY_SEQUENCE_SUBTREE = "/blobstoremaxkeysequencenumber";
     private final String key;
     private final NimbusInfo nimbusInfo;
-    private final int INT_CAPACITY = 4;
-    private final int INITIAL_SEQUENCE_NUMBER = 1;
+    private static final int INT_CAPACITY = 4;
+    private static final int INITIAL_SEQUENCE_NUMBER = 1;
 
     public KeySequenceNumber(String key, NimbusInfo nimbusInfo) {
         this.key = key;
diff --git a/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStore.java b/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStore.java
index b6bfd47..a8f519d 100644
--- a/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStore.java
+++ b/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStore.java
@@ -14,6 +14,12 @@
 
 package org.apache.storm.blobstore;
 
+import static org.apache.storm.blobstore.BlobStoreAclHandler.ADMIN;
+import static org.apache.storm.blobstore.BlobStoreAclHandler.READ;
+import static org.apache.storm.blobstore.BlobStoreAclHandler.WRITE;
+import static org.apache.storm.daemon.nimbus.Nimbus.NIMBUS_SUBJECT;
+import static org.apache.storm.daemon.nimbus.Nimbus.getVersionForKey;
+
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -49,25 +55,22 @@
 import org.apache.storm.utils.Utils;
 import org.apache.storm.utils.WrappedKeyAlreadyExistsException;
 import org.apache.storm.utils.WrappedKeyNotFoundException;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.storm.blobstore.BlobStoreAclHandler.*;
-import static org.apache.storm.daemon.nimbus.Nimbus.NIMBUS_SUBJECT;
-import static org.apache.storm.daemon.nimbus.Nimbus.getVersionForKey;
-
 /**
  * Provides a local file system backed blob store implementation for Nimbus.
  *
- * For a local blob store the user and the supervisor use NimbusBlobStore Client API in order to talk to nimbus through thrift.
+ * <p>For a local blob store the user and the supervisor use NimbusBlobStore Client API in order to talk to nimbus through thrift.
  * The authentication and authorization here is based on the subject.
  * We currently have NIMBUS_ADMINS and SUPERVISOR_ADMINS configuration. NIMBUS_ADMINS are given READ, WRITE and ADMIN
  * access whereas the SUPERVISOR_ADMINS are given READ access in order to read and download the blobs form the nimbus.
  *
- * The ACLs for the blob store are validated against whether the subject is a NIMBUS_ADMIN, SUPERVISOR_ADMIN or USER
+ * <p>The ACLs for the blob store are validated against whether the subject is a NIMBUS_ADMIN, SUPERVISOR_ADMIN or USER
  * who has read, write or admin privileges in order to perform respective operations on the blob.
  *
- * For local blob store
+ * <p>For local blob store
  * 1. The USER interacts with nimbus to upload and access blobs through NimbusBlobStore Client API.
  * 2. The USER sets the ACLs, and the blob access is validated against these ACLs.
  * 3. The SUPERVISOR interacts with nimbus through the NimbusBlobStore Client API to download the blobs.
@@ -78,9 +81,9 @@
     public static final Logger LOG = LoggerFactory.getLogger(LocalFsBlobStore.class);
     private static final String DATA_PREFIX = "data_";
     private static final String META_PREFIX = "meta_";
-    private final String BLOBSTORE_SUBTREE = "/blobstore/";
+    private static final String BLOBSTORE_SUBTREE = "/blobstore/";
     private final int allPermissions = READ | WRITE | ADMIN;
-    protected BlobStoreAclHandler _aclHandler;
+    protected BlobStoreAclHandler aclHandler;
     private NimbusInfo nimbusInfo;
     private FileBlobStoreImpl fbs;
     private Map<String, Object> conf;
@@ -103,7 +106,7 @@
         } catch (IOException e) {
             throw new RuntimeException(e);
         }
-        _aclHandler = new BlobStoreAclHandler(conf);
+        aclHandler = new BlobStoreAclHandler(conf);
         try {
             this.stormClusterState = ClusterUtils.mkStormClusterState(conf, new ClusterStateContext(DaemonType.NIMBUS, conf));
         } catch (Exception e) {
@@ -115,8 +118,6 @@
 
     /**
      * Sets up blobstore state for all current keys.
-     * @throws KeyNotFoundException
-     * @throws AuthorizationException
      */
     private void setupBlobstore() throws AuthorizationException, KeyNotFoundException {
         IStormClusterState state = stormClusterState;
@@ -198,7 +199,7 @@
                     throw new RuntimeException(e);
                 }
             }
-        }, 0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CODE_SYNC_FREQ_SECS))*1000);
+        }, 0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CODE_SYNC_FREQ_SECS)) * 1000);
 
     }
 
@@ -207,18 +208,18 @@
         KeyAlreadyExistsException {
         LOG.debug("Creating Blob for key {}", key);
         validateKey(key);
-        _aclHandler.normalizeSettableBlobMeta(key, meta, who, allPermissions);
+        aclHandler.normalizeSettableBlobMeta(key, meta, who, allPermissions);
         BlobStoreAclHandler.validateSettableACLs(key, meta.get_acl());
-        _aclHandler.hasPermissions(meta.get_acl(), allPermissions, who, key);
+        aclHandler.hasPermissions(meta.get_acl(), allPermissions, who, key);
         if (fbs.exists(DATA_PREFIX + key)) {
             throw new WrappedKeyAlreadyExistsException(key);
         }
-        BlobStoreFileOutputStream mOut = null;
+        BlobStoreFileOutputStream outputStream = null;
         try {
-            mOut = new BlobStoreFileOutputStream(fbs.write(META_PREFIX + key, true));
-            mOut.write(Utils.thriftSerialize(meta));
-            mOut.close();
-            mOut = null;
+            outputStream = new BlobStoreFileOutputStream(fbs.write(META_PREFIX + key, true));
+            outputStream.write(Utils.thriftSerialize(meta));
+            outputStream.close();
+            outputStream = null;
             this.stormClusterState.setupBlob(key, this.nimbusInfo, getVersionForKey(key, this.nimbusInfo, zkClient));
             return new BlobStoreFileOutputStream(fbs.write(DATA_PREFIX + key, true));
         } catch (IOException e) {
@@ -226,9 +227,9 @@
         } catch (KeyNotFoundException e) {
             throw new RuntimeException(e);
         } finally {
-            if (mOut != null) {
+            if (outputStream != null) {
                 try {
-                    mOut.cancel();
+                    outputStream.cancel();
                 } catch (IOException e) {
                     //Ignored
                 }
@@ -285,7 +286,7 @@
             checkForBlobUpdate(key);
         }
         SettableBlobMeta meta = getStoredBlobMeta(key);
-        _aclHandler.validateUserCanReadMeta(meta.get_acl(), who, key);
+        aclHandler.validateUserCanReadMeta(meta.get_acl(), who, key);
         ReadableBlobMeta rbm = new ReadableBlobMeta();
         rbm.set_settable(meta);
         try {
@@ -298,9 +299,7 @@
     }
 
     /**
-     * Sets leader elector (only used by LocalFsBlobStore to help sync blobs between Nimbi
-     *
-     * @param leaderElector
+     * Sets leader elector (only used by LocalFsBlobStore to help sync blobs between Nimbi.
      */
     @Override
     public void setLeaderElector(ILeaderElector leaderElector) {
@@ -311,22 +310,22 @@
     public void setBlobMeta(String key, SettableBlobMeta meta, Subject who) throws AuthorizationException, KeyNotFoundException {
         validateKey(key);
         checkForBlobOrDownload(key);
-        _aclHandler.normalizeSettableBlobMeta(key, meta, who, ADMIN);
+        aclHandler.normalizeSettableBlobMeta(key, meta, who, ADMIN);
         BlobStoreAclHandler.validateSettableACLs(key, meta.get_acl());
         SettableBlobMeta orig = getStoredBlobMeta(key);
-        _aclHandler.hasPermissions(orig.get_acl(), ADMIN, who, key);
-        BlobStoreFileOutputStream mOut = null;
+        aclHandler.hasPermissions(orig.get_acl(), ADMIN, who, key);
+        BlobStoreFileOutputStream outputStream = null;
         try {
-            mOut = new BlobStoreFileOutputStream(fbs.write(META_PREFIX + key, false));
-            mOut.write(Utils.thriftSerialize(meta));
-            mOut.close();
-            mOut = null;
+            outputStream = new BlobStoreFileOutputStream(fbs.write(META_PREFIX + key, false));
+            outputStream.write(Utils.thriftSerialize(meta));
+            outputStream.close();
+            outputStream = null;
         } catch (IOException e) {
             throw new RuntimeException(e);
         } finally {
-            if (mOut != null) {
+            if (outputStream != null) {
                 try {
-                    mOut.cancel();
+                    outputStream.cancel();
                 } catch (IOException e) {
                     //Ignored
                 }
@@ -338,7 +337,7 @@
     public void deleteBlob(String key, Subject who) throws AuthorizationException, KeyNotFoundException {
         validateKey(key);
 
-        if (!_aclHandler.checkForValidUsers(who, WRITE)) {
+        if (!aclHandler.checkForValidUsers(who, WRITE)) {
             // need to get ACL from meta
             LOG.debug("Retrieving meta to get ACL info... key: {} subject: {}", key, who);
 
@@ -368,7 +367,7 @@
     private void checkPermission(String key, Subject who, int mask) throws KeyNotFoundException, AuthorizationException {
         checkForBlobOrDownload(key);
         SettableBlobMeta meta = getStoredBlobMeta(key);
-        _aclHandler.hasPermissions(meta.get_acl(), mask, who, key);
+        aclHandler.hasPermissions(meta.get_acl(), mask, who, key);
     }
 
     private void deleteKeyIgnoringFileNotFound(String key) throws IOException {
@@ -390,7 +389,7 @@
             checkForBlobUpdate(key);
         }
         SettableBlobMeta meta = getStoredBlobMeta(key);
-        _aclHandler.hasPermissions(meta.get_acl(), READ, who, key);
+        aclHandler.hasPermissions(meta.get_acl(), READ, who, key);
         try {
             return new BlobStoreFileInputStream(fbs.read(DATA_PREFIX + key));
         } catch (IOException e) {
@@ -423,7 +422,7 @@
         int replicationCount = 0;
         validateKey(key);
         SettableBlobMeta meta = getStoredBlobMeta(key);
-        _aclHandler.hasPermissions(meta.get_acl(), READ, who, key);
+        aclHandler.hasPermissions(meta.get_acl(), READ, who, key);
         if (zkClient.checkExists().forPath(BLOBSTORE_SUBTREE + key) == null) {
             return 0;
         }
diff --git a/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStoreFile.java b/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStoreFile.java
index d160ba6..c4f3164 100644
--- a/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStoreFile.java
+++ b/storm-server/src/main/java/org/apache/storm/blobstore/LocalFsBlobStoreFile.java
@@ -25,60 +25,60 @@
 
 public class LocalFsBlobStoreFile extends BlobStoreFile {
 
-    private final String _key;
-    private final boolean _isTmp;
-    private final File _path;
-    private final boolean _mustBeNew;
-    private Long _modTime = null;
+    private final String key;
+    private final boolean isTmp;
+    private final File path;
+    private final boolean mustBeNew;
+    private Long modTime = null;
     private SettableBlobMeta meta;
 
     public LocalFsBlobStoreFile(File base, String name) {
         if (BlobStoreFile.BLOBSTORE_DATA_FILE.equals(name)) {
-            _isTmp = false;
+            isTmp = false;
         } else {
             Matcher m = TMP_NAME_PATTERN.matcher(name);
             if (!m.matches()) {
                 throw new IllegalArgumentException("File name does not match '" + name + "' !~ " + TMP_NAME_PATTERN);
             }
-            _isTmp = true;
+            isTmp = true;
         }
-        _key = base.getName();
-        _path = new File(base, name);
-        _mustBeNew = false;
+        key = base.getName();
+        path = new File(base, name);
+        mustBeNew = false;
     }
 
     public LocalFsBlobStoreFile(File base, boolean isTmp, boolean mustBeNew) {
-        _key = base.getName();
-        _isTmp = isTmp;
-        _mustBeNew = mustBeNew;
-        if (_isTmp) {
-            _path = new File(base, System.currentTimeMillis() + TMP_EXT);
+        key = base.getName();
+        this.isTmp = isTmp;
+        this.mustBeNew = mustBeNew;
+        if (this.isTmp) {
+            path = new File(base, System.currentTimeMillis() + TMP_EXT);
         } else {
-            _path = new File(base, BlobStoreFile.BLOBSTORE_DATA_FILE);
+            path = new File(base, BlobStoreFile.BLOBSTORE_DATA_FILE);
         }
     }
 
     @Override
     public void delete() throws IOException {
-        _path.delete();
+        path.delete();
     }
 
     @Override
     public boolean isTmp() {
-        return _isTmp;
+        return isTmp;
     }
 
     @Override
     public String getKey() {
-        return _key;
+        return key;
     }
 
     @Override
     public long getModTime() throws IOException {
-        if (_modTime == null) {
-            _modTime = _path.lastModified();
+        if (modTime == null) {
+            modTime = path.lastModified();
         }
-        return _modTime;
+        return modTime;
     }
 
     @Override
@@ -86,7 +86,7 @@
         if (isTmp()) {
             throw new IllegalStateException("Cannot read from a temporary part file.");
         }
-        return new FileInputStream(_path);
+        return new FileInputStream(path);
     }
 
     @Override
@@ -96,16 +96,16 @@
         }
         boolean success = false;
         try {
-            success = _path.createNewFile();
+            success = path.createNewFile();
         } catch (IOException e) {
             //Try to create the parent directory, may not work
-            _path.getParentFile().mkdirs();
-            success = _path.createNewFile();
+            path.getParentFile().mkdirs();
+            success = path.createNewFile();
         }
         if (!success) {
-            throw new IOException(_path + " already exists");
+            throw new IOException(path + " already exists");
         }
-        return new FileOutputStream(_path);
+        return new FileOutputStream(path);
     }
 
     @Override
@@ -114,11 +114,11 @@
             throw new IllegalStateException("Can only write to a temporary part file.");
         }
 
-        File dest = new File(_path.getParentFile(), BlobStoreFile.BLOBSTORE_DATA_FILE);
-        if (_mustBeNew) {
-            Files.move(_path.toPath(), dest.toPath(), StandardCopyOption.ATOMIC_MOVE);
+        File dest = new File(path.getParentFile(), BlobStoreFile.BLOBSTORE_DATA_FILE);
+        if (mustBeNew) {
+            Files.move(path.toPath(), dest.toPath(), StandardCopyOption.ATOMIC_MOVE);
         } else {
-            Files.move(_path.toPath(), dest.toPath(), StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
+            Files.move(path.toPath(), dest.toPath(), StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
         }
     }
 
@@ -142,12 +142,12 @@
 
     @Override
     public String toString() {
-        return _path + ":" + (_isTmp ? "tmp" : BlobStoreFile.BLOBSTORE_DATA_FILE) + ":" + _key;
+        return path + ":" + (isTmp ? "tmp" : BlobStoreFile.BLOBSTORE_DATA_FILE) + ":" + key;
     }
 
     @Override
     public long getFileLength() {
-        return _path.length();
+        return path.length();
     }
 }
 
diff --git a/storm-server/src/main/java/org/apache/storm/container/ResourceIsolationInterface.java b/storm-server/src/main/java/org/apache/storm/container/ResourceIsolationInterface.java
index 8e2ae3c..89df29a 100644
--- a/storm-server/src/main/java/org/apache/storm/container/ResourceIsolationInterface.java
+++ b/storm-server/src/main/java/org/apache/storm/container/ResourceIsolationInterface.java
@@ -23,7 +23,7 @@
 public interface ResourceIsolationInterface {
 
     /**
-     * Called when starting up
+     * Called when starting up.
      *
      * @param conf the cluster config
      * @throws IOException on any error.
@@ -85,6 +85,7 @@
     long getMemoryUsage(String workerId) throws IOException;
 
     /**
+     * Get the system free memory in MB.
      * @return The amount of memory in bytes that are free on the system. This might not be the entire box, it might be
      *     within a parent resource group.
      * @throws IOException on any error.
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/drpc/BlockingOutstandingRequest.java b/storm-server/src/main/java/org/apache/storm/daemon/drpc/BlockingOutstandingRequest.java
index e11e363..c2395b1 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/drpc/BlockingOutstandingRequest.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/drpc/BlockingOutstandingRequest.java
@@ -26,42 +26,42 @@
 
 public class BlockingOutstandingRequest extends OutstandingRequest {
     public static final RequestFactory<BlockingOutstandingRequest> FACTORY = BlockingOutstandingRequest::new;
-    private Semaphore _sem;
-    private volatile String _result = null;
-    private volatile DRPCExecutionException _e = null;
+    private Semaphore sem;
+    private volatile String result = null;
+    private volatile DRPCExecutionException drpcExecutionException = null;
 
     public BlockingOutstandingRequest(String function, DRPCRequest req) {
         super(function, req);
-        _sem = new Semaphore(0);
+        sem = new Semaphore(0);
     }
 
     public String getResult() throws DRPCExecutionException {
         try {
-            _sem.acquire();
+            sem.acquire();
         } catch (InterruptedException e) {
             //Ignored
         }
 
-        if (_result != null) {
-            return _result;
+        if (result != null) {
+            return result;
         }
 
-        if (_e == null) {
-            _e = new WrappedDRPCExecutionException("Internal Error: No Result and No Exception");
-            _e.set_type(DRPCExceptionType.INTERNAL_ERROR);
+        if (drpcExecutionException == null) {
+            drpcExecutionException = new WrappedDRPCExecutionException("Internal Error: No Result and No Exception");
+            drpcExecutionException.set_type(DRPCExceptionType.INTERNAL_ERROR);
         }
-        throw _e;
+        throw drpcExecutionException;
     }
 
     @Override
     public void returnResult(String result) {
-        _result = result;
-        _sem.release();
+        this.result = result;
+        sem.release();
     }
 
     @Override
     public void fail(DRPCExecutionException e) {
-        _e = e;
-        _sem.release();
+        drpcExecutionException = e;
+        sem.release();
     }
 }
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPC.java b/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPC.java
index 2d853f3..23183f0 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPC.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPC.java
@@ -46,6 +46,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public class DRPC implements AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(DRPC.class);
     private static final DRPCRequest NOTHING_REQUEST = new DRPCRequest("", "");
@@ -66,14 +67,14 @@
     }
 
     //Waiting to be fetched
-    private final ConcurrentHashMap<String, ConcurrentLinkedQueue<OutstandingRequest>> _queues =
-        new ConcurrentHashMap<>();
+    private final ConcurrentHashMap<String, ConcurrentLinkedQueue<OutstandingRequest>> queues =
+            new ConcurrentHashMap<>();
     //Waiting to be returned
-    private final ConcurrentHashMap<String, OutstandingRequest> _requests =
-        new ConcurrentHashMap<>();
+    private final ConcurrentHashMap<String, OutstandingRequest> requests =
+            new ConcurrentHashMap<>();
     private final Timer timer = new Timer("DRPC-CLEANUP-TIMER", true);
-    private final AtomicLong _ctr = new AtomicLong(0);
-    private final IAuthorizer _auth;
+    private final AtomicLong ctr = new AtomicLong(0);
+    private final IAuthorizer auth;
 
     public DRPC(StormMetricsRegistry metricsRegistry, Map<String, Object> conf) {
         this(metricsRegistry, mkAuthorizationHandler((String) conf.get(DaemonConfig.DRPC_AUTHORIZER), conf),
@@ -81,7 +82,7 @@
     }
 
     public DRPC(StormMetricsRegistry metricsRegistry, IAuthorizer auth, long timeoutMs) {
-        _auth = auth;
+        this.auth = auth;
         this.meterServerTimedOut = metricsRegistry.registerMeter("drpc:num-server-timedout-requests");
         this.meterExecuteCalls = metricsRegistry.registerMeter("drpc:num-execute-calls");
         this.meterResultCalls = metricsRegistry.registerMeter("drpc:num-result-calls");
@@ -135,22 +136,22 @@
     }
 
     private void checkAuthorization(String operation, String function) throws AuthorizationException {
-        checkAuthorization(ReqContext.context(), _auth, operation, function);
+        checkAuthorization(ReqContext.context(), auth, operation, function);
     }
 
     private void checkAuthorizationNoLog(String operation, String function) throws AuthorizationException {
-        checkAuthorization(ReqContext.context(), _auth, operation, function, false);
+        checkAuthorization(ReqContext.context(), auth, operation, function, false);
     }
 
     private void cleanup(String id) {
-        OutstandingRequest req = _requests.remove(id);
+        OutstandingRequest req = requests.remove(id);
         if (req != null && !req.wasFetched()) {
-            _queues.get(req.getFunction()).remove(req);
+            queues.get(req.getFunction()).remove(req);
         }
     }
 
     private void cleanupAll(long timeoutMs, DRPCExecutionException exp) {
-        for (Entry<String, OutstandingRequest> e : _requests.entrySet()) {
+        for (Entry<String, OutstandingRequest> e : requests.entrySet()) {
             OutstandingRequest req = e.getValue();
             if (req.isTimedOut(timeoutMs)) {
                 req.fail(exp);
@@ -161,17 +162,17 @@
     }
 
     private String nextId() {
-        return String.valueOf(_ctr.incrementAndGet());
+        return String.valueOf(ctr.incrementAndGet());
     }
 
     private ConcurrentLinkedQueue<OutstandingRequest> getQueue(String function) {
         if (function == null) {
             throw new IllegalArgumentException("The function for a request cannot be null");
         }
-        ConcurrentLinkedQueue<OutstandingRequest> queue = _queues.get(function);
+        ConcurrentLinkedQueue<OutstandingRequest> queue = queues.get(function);
         if (queue == null) {
-            _queues.putIfAbsent(function, new ConcurrentLinkedQueue<>());
-            queue = _queues.get(function);
+            queues.putIfAbsent(function, new ConcurrentLinkedQueue<>());
+            queue = queues.get(function);
         }
         return queue;
     }
@@ -179,7 +180,7 @@
     public void returnResult(String id, String result) throws AuthorizationException {
         meterResultCalls.mark();
         LOG.debug("Got a result {} {}", id, result);
-        OutstandingRequest req = _requests.get(id);
+        OutstandingRequest req = requests.get(id);
         if (req != null) {
             checkAuthorization("result", req.getFunction());
             req.returnResult(result);
@@ -204,7 +205,7 @@
     public void failRequest(String id, DRPCExecutionException e) throws AuthorizationException {
         meterFailRequestCalls.mark();
         LOG.debug("Got a fail {}", id);
-        OutstandingRequest req = _requests.get(id);
+        OutstandingRequest req = requests.get(id);
         if (req != null) {
             checkAuthorization("failRequest", req.getFunction());
             if (e == null) {
@@ -221,7 +222,7 @@
         String id = nextId();
         LOG.debug("Execute {} {}", functionName, funcArgs);
         T req = factory.mkRequest(functionName, new DRPCRequest(funcArgs, id));
-        _requests.put(id, req);
+        requests.put(id, req);
         ConcurrentLinkedQueue<OutstandingRequest> q = getQueue(functionName);
         q.add(req);
         return req;
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPCThrift.java b/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPCThrift.java
index 01d2392..f14f0ab 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPCThrift.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/drpc/DRPCThrift.java
@@ -24,36 +24,37 @@
 import org.apache.storm.generated.DistributedRPC;
 import org.apache.storm.generated.DistributedRPCInvocations;
 
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public class DRPCThrift implements DistributedRPC.Iface, DistributedRPCInvocations.Iface {
-    private final DRPC _drpc;
+    private final DRPC drpc;
 
     public DRPCThrift(DRPC drpc) {
-        _drpc = drpc;
+        this.drpc = drpc;
     }
 
     @Override
     public void result(String id, String result) throws AuthorizationException {
-        _drpc.returnResult(id, result);
+        drpc.returnResult(id, result);
     }
 
     @Override
     public DRPCRequest fetchRequest(String functionName) throws AuthorizationException {
-        return _drpc.fetchRequest(functionName);
+        return drpc.fetchRequest(functionName);
     }
 
     @Override
     public void failRequest(String id) throws AuthorizationException {
-        _drpc.failRequest(id, null);
+        drpc.failRequest(id, null);
     }
 
     @Override
     public void failRequestV2(String id, DRPCExecutionException e) throws AuthorizationException {
-        _drpc.failRequest(id, e);
+        drpc.failRequest(id, e);
     }
 
     @Override
     public String execute(String functionName, String funcArgs)
         throws DRPCExecutionException, AuthorizationException {
-        return _drpc.executeBlocking(functionName, funcArgs);
+        return drpc.executeBlocking(functionName, funcArgs);
     }
 }
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/drpc/OutstandingRequest.java b/storm-server/src/main/java/org/apache/storm/daemon/drpc/OutstandingRequest.java
index 06c596e..17a3985 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/drpc/OutstandingRequest.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/drpc/OutstandingRequest.java
@@ -23,35 +23,35 @@
 import org.apache.storm.utils.Time;
 
 public abstract class OutstandingRequest {
-    private final long _start;
-    private final String _function;
-    private final DRPCRequest _req;
-    private volatile boolean _fetched = false;
+    private final long start;
+    private final String function;
+    private final DRPCRequest req;
+    private volatile boolean fetched = false;
 
     public OutstandingRequest(String function, DRPCRequest req) {
-        _start = Time.currentTimeMillis();
-        _function = function;
-        _req = req;
+        start = Time.currentTimeMillis();
+        this.function = function;
+        this.req = req;
     }
 
     public DRPCRequest getRequest() {
-        return _req;
+        return req;
     }
 
     public void fetched() {
-        _fetched = true;
+        fetched = true;
     }
 
     public boolean wasFetched() {
-        return _fetched;
+        return fetched;
     }
 
     public String getFunction() {
-        return _function;
+        return function;
     }
 
     public boolean isTimedOut(long timeoutMs) {
-        return (_start + timeoutMs) <= Time.currentTimeMillis();
+        return (start + timeoutMs) <= Time.currentTimeMillis();
     }
 
     public abstract void returnResult(String result);
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/drpc/RequestFactory.java b/storm-server/src/main/java/org/apache/storm/daemon/drpc/RequestFactory.java
index e6cd799..d847417 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/drpc/RequestFactory.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/drpc/RequestFactory.java
@@ -21,5 +21,6 @@
 import org.apache.storm.generated.DRPCRequest;
 
 public interface RequestFactory<T extends OutstandingRequest> {
-    public T mkRequest(String function, DRPCRequest req);
+
+    T mkRequest(String function, DRPCRequest req);
 }
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/HeartbeatCache.java b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/HeartbeatCache.java
index 4774260..6f8abc3 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/HeartbeatCache.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/HeartbeatCache.java
@@ -49,7 +49,7 @@
         private Integer nimbusTimeSecs;
         private Integer executorReportedTimeSecs;
 
-        public ExecutorCache(Map<String, Object> newBeat) {
+        ExecutorCache(Map<String, Object> newBeat) {
             if (newBeat != null) {
                 executorReportedTimeSecs = (Integer) newBeat.getOrDefault(ClientStatsUtil.TIME_SECS, 0);
             } else {
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/Nimbus.java b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/Nimbus.java
index 96d6c21..45add73 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/Nimbus.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/Nimbus.java
@@ -672,17 +672,17 @@
     @SuppressWarnings("deprecation")
     private static <T extends AutoCloseable> TimeCacheMap<String, T> makeBlobCacheMap(Map<String, Object> conf) {
         return new TimeCacheMap<>(ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_BLOBSTORE_EXPIRATION_SECS), 600),
-                                  (id, stream) -> {
-                                      try {
-                                          if (stream instanceof AtomicOutputStream) {
-                                              ((AtomicOutputStream) stream).cancel();
-                                          } else {
-                                              stream.close();
-                                          }
-                                      } catch (Exception e) {
-                                          throw new RuntimeException(e);
-                                      }
-                                  });
+            (id, stream) -> {
+                try {
+                    if (stream instanceof AtomicOutputStream) {
+                        ((AtomicOutputStream) stream).cancel();
+                    } else {
+                        stream.close();
+                    }
+                } catch (Exception e) {
+                    throw new RuntimeException(e);
+                }
+            });
     }
 
     /**
@@ -1009,7 +1009,7 @@
      * ignored. The delay is to prevent a race conditions such as when a blobstore is created and when the topology
      * is submitted. It is possible the Nimbus cleanup timer task will find entries to delete between these two events.
      *
-     * Tracked topology entries are rotated out of the stored map periodically.
+     * <p>Tracked topology entries are rotated out of the stored map periodically.
      *
      * @param toposToClean topologies considered for cleanup
      * @param conf the nimbus conf
@@ -1168,7 +1168,7 @@
     private static void validateTopologySize(Map<String, Object> topoConf, Map<String, Object> nimbusConf,
         StormTopology topology) throws InvalidTopologyException {
         // check allowedWorkers only if the scheduler is not the Resource Aware Scheduler
-        if (!ServerUtils.isRAS(nimbusConf)) {
+        if (!ServerUtils.isRas(nimbusConf)) {
             int workerCount = ObjectReader.getInt(topoConf.get(Config.TOPOLOGY_WORKERS), 1);
             Integer allowedWorkers = ObjectReader.getInt(nimbusConf.get(DaemonConfig.NIMBUS_SLOTS_PER_TOPOLOGY), null);
             if (allowedWorkers != null && workerCount > allowedWorkers) {
@@ -1245,8 +1245,6 @@
     private static Map<IClusterMetricsConsumer.SupervisorInfo, List<DataPoint>> extractSupervisorMetrics(ClusterSummary summ) {
         Map<IClusterMetricsConsumer.SupervisorInfo, List<DataPoint>> ret = new HashMap<>();
         for (SupervisorSummary sup : summ.get_supervisors()) {
-            IClusterMetricsConsumer.SupervisorInfo info =
-                new IClusterMetricsConsumer.SupervisorInfo(sup.get_host(), sup.get_supervisor_id(), Time.currentTimeSecs());
             List<DataPoint> metrics = new ArrayList<>();
             metrics.add(new DataPoint("slotsTotal", sup.get_num_workers()));
             metrics.add(new DataPoint("slotsUsed", sup.get_num_used_workers()));
@@ -1254,6 +1252,8 @@
             metrics.add(new DataPoint("totalCpu", sup.get_total_resources().get(Constants.COMMON_CPU_RESOURCE_NAME)));
             metrics.add(new DataPoint("usedMem", sup.get_used_mem()));
             metrics.add(new DataPoint("usedCpu", sup.get_used_cpu()));
+            IClusterMetricsConsumer.SupervisorInfo info =
+                    new IClusterMetricsConsumer.SupervisorInfo(sup.get_host(), sup.get_supervisor_id(), Time.currentTimeSecs());
             ret.put(info, metrics);
         }
         return ret;
@@ -1277,6 +1277,132 @@
         }
     }
 
+    @VisibleForTesting
+    public void launchServer() throws Exception {
+        try {
+            IStormClusterState state = stormClusterState;
+            NimbusInfo hpi = nimbusHostPortInfo;
+
+            LOG.info("Starting Nimbus with conf {}", ConfigUtils.maskPasswords(conf));
+            validator.prepare(conf);
+
+            //add to nimbuses
+            state.addNimbusHost(hpi.getHost(),
+                    new NimbusSummary(hpi.getHost(), hpi.getPort(), Time.currentTimeSecs(), false, STORM_VERSION));
+            leaderElector.addToLeaderLockQueue();
+            this.blobStore.startSyncBlobs();
+
+            for (ClusterMetricsConsumerExecutor exec: clusterConsumerExceutors) {
+                exec.prepare();
+            }
+
+            if (isLeader()) {
+                for (String topoId : state.activeStorms()) {
+                    transition(topoId, TopologyActions.STARTUP, null);
+                }
+                clusterMetricSet.setActive(true);
+            }
+
+            final boolean doNotReassign = (Boolean) conf.getOrDefault(ServerConfigUtils.NIMBUS_DO_NOT_REASSIGN, false);
+            timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_MONITOR_FREQ_SECS)),
+                () -> {
+                    try {
+                        if (!doNotReassign) {
+                            mkAssignments();
+                        }
+                        doCleanup();
+                    } catch (Exception e) {
+                        throw new RuntimeException(e);
+                    }
+                });
+
+            // Schedule Nimbus inbox cleaner
+            final int jarExpSecs = ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_INBOX_JAR_EXPIRATION_SECS));
+            timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CLEANUP_INBOX_FREQ_SECS)),
+                () -> {
+                    try {
+                        cleanInbox(getInbox(), jarExpSecs);
+                    } catch (Exception e) {
+                        throw new RuntimeException(e);
+                    }
+                });
+
+
+            // Schedule topology history cleaner
+            Integer interval = ObjectReader.getInt(conf.get(DaemonConfig.LOGVIEWER_CLEANUP_INTERVAL_SECS), null);
+            if (interval != null) {
+                final int lvCleanupAgeMins = ObjectReader.getInt(conf.get(DaemonConfig.LOGVIEWER_CLEANUP_AGE_MINS));
+                timer.scheduleRecurring(0, interval,
+                    () -> {
+                        try {
+                            cleanTopologyHistory(lvCleanupAgeMins);
+                        } catch (Exception e) {
+                            throw new RuntimeException(e);
+                        }
+                    });
+            }
+
+            timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CREDENTIAL_RENEW_FREQ_SECS)),
+                () -> {
+                    try {
+                        renewCredentials();
+                    } catch (Exception e) {
+                        throw new RuntimeException(e);
+                    }
+                });
+
+            metricsRegistry.registerGauge("nimbus:total-available-memory-non-negative", () -> nodeIdToResources.get().values()
+                    .parallelStream()
+                    .mapToDouble(supervisorResources -> Math.max(supervisorResources.getAvailableMem(), 0))
+                    .sum());
+            metricsRegistry.registerGauge("nimbus:available-cpu-non-negative", () -> nodeIdToResources.get().values()
+                    .parallelStream()
+                    .mapToDouble(supervisorResources -> Math.max(supervisorResources.getAvailableCpu(), 0))
+                    .sum());
+            metricsRegistry.registerGauge("nimbus:total-memory", () -> nodeIdToResources.get().values()
+                    .parallelStream()
+                    .mapToDouble(SupervisorResources::getTotalMem)
+                    .sum());
+            metricsRegistry.registerGauge("nimbus:total-cpu", () -> nodeIdToResources.get().values()
+                    .parallelStream()
+                    .mapToDouble(SupervisorResources::getTotalCpu)
+                    .sum());
+            metricsRegistry.registerGauge("nimbus:longest-scheduling-time-ms", () -> {
+                //We want to update longest scheduling time in real time in case scheduler get stuck
+                // Get current time before startTime to avoid potential race with scheduler's Timer
+                Long currTime = Time.nanoTime();
+                Long startTime = schedulingStartTimeNs.get();
+                return TimeUnit.NANOSECONDS.toMillis(startTime == null
+                        ? longestSchedulingTime.get()
+                        : Math.max(currTime - startTime, longestSchedulingTime.get()));
+            });
+            metricsRegistry.registerMeter("nimbus:num-launched").mark();
+
+            timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.STORM_CLUSTER_METRICS_CONSUMER_PUBLISH_INTERVAL_SECS)),
+                () -> {
+                    try {
+                        if (isLeader()) {
+                            sendClusterMetricsToExecutors();
+                        }
+                    } catch (Exception e) {
+                        throw new RuntimeException(e);
+                    }
+                });
+
+            timer.scheduleRecurring(5, 5, clusterMetricSet);
+        } catch (Exception e) {
+            if (Utils.exceptionCauseIsInstanceOf(InterruptedException.class, e)) {
+                throw e;
+            }
+
+            if (Utils.exceptionCauseIsInstanceOf(InterruptedIOException.class, e)) {
+                throw e;
+            }
+            LOG.error("Error on initialization of nimbus", e);
+            Utils.exitProcess(13, "Error on initialization of nimbus");
+        }
+    }
+
     private static Nimbus launchServer(Map<String, Object> conf, INimbus inimbus) throws Exception {
         StormCommon.validateDistributedMode(conf);
         validatePortAvailable(conf);
@@ -1314,6 +1440,7 @@
         launch(new StandaloneINimbus());
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     private static CuratorFramework makeZKClient(Map<String, Object> conf) {
         List<String> servers = (List<String>) conf.get(Config.STORM_ZOOKEEPER_SERVERS);
         Object port = conf.get(Config.STORM_ZOOKEEPER_PORT);
@@ -1431,10 +1558,10 @@
 
     @VisibleForTesting
     static void validateTopologyWorkerMaxHeapSizeConfigs(
-        Map<String, Object> stormConf, StormTopology topology, double defaultWorkerMaxHeapSizeMB) {
+        Map<String, Object> stormConf, StormTopology topology, double defaultWorkerMaxHeapSizeMb) {
         double largestMemReq = getMaxExecutorMemoryUsageForTopo(topology, stormConf);
         double topologyWorkerMaxHeapSize =
-            ObjectReader.getDouble(stormConf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB), defaultWorkerMaxHeapSizeMB);
+            ObjectReader.getDouble(stormConf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB), defaultWorkerMaxHeapSizeMb);
         if (topologyWorkerMaxHeapSize < largestMemReq) {
             throw new IllegalArgumentException(
                 "Topology will not be able to be successfully scheduled: Config "
@@ -2013,10 +2140,10 @@
     private boolean isFragmented(SupervisorResources supervisorResources) {
         double minMemory = ObjectReader.getDouble(conf.get(Config.TOPOLOGY_COMPONENT_RESOURCES_ONHEAP_MEMORY_MB), 256.0)
                            + ObjectReader.getDouble(conf.get(Config.TOPOLOGY_ACKER_RESOURCES_ONHEAP_MEMORY_MB), 128.0);
-        double minCPU = ObjectReader.getDouble(conf.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT), 50.0)
+        double minCpu = ObjectReader.getDouble(conf.get(Config.TOPOLOGY_COMPONENT_CPU_PCORE_PERCENT), 50.0)
                         + ObjectReader.getDouble(conf.get(Config.TOPOLOGY_ACKER_CPU_PCORE_PERCENT), 50.0);
 
-        return minMemory > supervisorResources.getAvailableMem() || minCPU > supervisorResources.getAvailableCpu();
+        return minMemory > supervisorResources.getAvailableMem() || minCpu > supervisorResources.getAvailableCpu();
     }
 
     private double fragmentedMemory() {
@@ -2148,6 +2275,7 @@
         return ret;
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     private boolean isReadyForMKAssignments() throws Exception {
         if (isLeader()) {
             if (isHeartbeatsRecovered()) {
@@ -2384,7 +2512,6 @@
                                String principal, Map<String, Object> topoConf, StormTopology stormTopology)
         throws KeyNotFoundException, AuthorizationException, IOException, InvalidTopologyException {
         assert (TopologyStatus.ACTIVE == initStatus || TopologyStatus.INACTIVE == initStatus);
-        IStormClusterState state = stormClusterState;
         Map<String, Integer> numExecutors = new HashMap<>();
         StormTopology topology = StormCommon.systemTopology(topoConf, stormTopology);
         for (Entry<String, Object> entry : StormCommon.allComponents(topology).entrySet()) {
@@ -2403,6 +2530,7 @@
         base.set_owner(owner);
         base.set_principal(principal);
         base.set_component_debug(new HashMap<>());
+        IStormClusterState state = stormClusterState;
         state.activateStorm(topoId, base, topoConf);
         idToExecutors.getAndUpdate(new Assoc<>(topoId,
             new HashSet<>(computeExecutors(topoId, base, topoConf, stormTopology))));
@@ -2434,8 +2562,7 @@
 
     @VisibleForTesting
     public void checkAuthorization(String topoName, Map<String, Object> topoConf, String operation, ReqContext context)
-        throws AuthorizationException {
-        IAuthorizer aclHandler = authorizationHandler;
+            throws AuthorizationException {
         IAuthorizer impersonationAuthorizer = impersonationAuthorizationHandler;
         if (context == null) {
             context = ReqContext.context();
@@ -2465,6 +2592,7 @@
             }
         }
 
+        IAuthorizer aclHandler = authorizationHandler;
         if (aclHandler != null) {
             if (!aclHandler.permit(context, operation, checkConf)) {
                 ThriftAccessLogger.logAccess(context.requestID(), context.remoteAddress(), context.principal(), operation,
@@ -2668,6 +2796,12 @@
     }
 
     private SupervisorSummary makeSupervisorSummary(String supervisorId, SupervisorInfo info) {
+        Set<String> blacklistedSupervisorIds = Collections.emptySet();
+        if (scheduler instanceof BlacklistScheduler) {
+            BlacklistScheduler bs = (BlacklistScheduler) scheduler;
+            blacklistedSupervisorIds = bs.getBlacklistSupervisorIds();
+        }
+
         LOG.debug("INFO: {} ID: {}", info, supervisorId);
         int numPorts = 0;
         if (info.is_set_meta()) {
@@ -2701,6 +2835,13 @@
         if (info.is_set_version()) {
             ret.set_version(info.get_version());
         }
+
+        if (blacklistedSupervisorIds.contains(supervisorId)) {
+            ret.set_blacklisted(true);
+        } else {
+            ret.set_blacklisted(false);
+        }
+
         return ret;
     }
 
@@ -2808,8 +2949,7 @@
     }
 
     private CommonTopoInfo getCommonTopoInfo(String topoId, String operation) throws NotAliveException,
-        AuthorizationException, IOException, InvalidTopologyException {
-        IStormClusterState state = stormClusterState;
+            AuthorizationException, IOException, InvalidTopologyException {
         CommonTopoInfo ret = new CommonTopoInfo();
         ret.topoConf = tryReadTopoConf(topoId, topoCache);
         ret.topoName = (String) ret.topoConf.get(Config.TOPOLOGY_NAME);
@@ -2817,6 +2957,7 @@
         StormTopology topology = tryReadTopology(topoId, topoCache);
         ret.topology = StormCommon.systemTopology(ret.topoConf, topology);
         ret.taskToComponent = StormCommon.stormTaskInfo(topology, ret.topoConf);
+        IStormClusterState state = stormClusterState;
         ret.base = state.stormBase(topoId, null);
         if (ret.base != null && ret.base.is_set_launch_time_secs()) {
             ret.launchTimeSecs = ret.base.get_launch_time_secs();
@@ -2833,131 +2974,6 @@
         ret.allComponents = new HashSet<>(ret.taskToComponent.values());
         return ret;
     }
-
-    @VisibleForTesting
-    public void launchServer() throws Exception {
-        try {
-            IStormClusterState state = stormClusterState;
-            NimbusInfo hpi = nimbusHostPortInfo;
-
-            LOG.info("Starting Nimbus with conf {}", ConfigUtils.maskPasswords(conf));
-            validator.prepare(conf);
-
-            //add to nimbuses
-            state.addNimbusHost(hpi.getHost(),
-                                new NimbusSummary(hpi.getHost(), hpi.getPort(), Time.currentTimeSecs(), false, STORM_VERSION));
-            leaderElector.addToLeaderLockQueue();
-            this.blobStore.startSyncBlobs();
-            
-            for (ClusterMetricsConsumerExecutor exec: clusterConsumerExceutors) {
-                exec.prepare();
-            }
-
-            if (isLeader()) {
-                for (String topoId : state.activeStorms()) {
-                    transition(topoId, TopologyActions.STARTUP, null);
-                }
-                clusterMetricSet.setActive(true);
-            }
-
-            final boolean doNotReassign = (Boolean) conf.getOrDefault(ServerConfigUtils.NIMBUS_DO_NOT_REASSIGN, false);
-            timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_MONITOR_FREQ_SECS)),
-                                    () -> {
-                                        try {
-                                            if (!doNotReassign) {
-                                                mkAssignments();
-                                            }
-                                            doCleanup();
-                                        } catch (Exception e) {
-                                            throw new RuntimeException(e);
-                                        }
-                                    });
-
-            // Schedule Nimbus inbox cleaner
-            final int jarExpSecs = ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_INBOX_JAR_EXPIRATION_SECS));
-            timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CLEANUP_INBOX_FREQ_SECS)),
-                    () -> {
-                        try {
-                            cleanInbox(getInbox(), jarExpSecs);
-                        } catch (Exception e) {
-                            throw new RuntimeException(e);
-                        }
-                    });
-            
-
-            // Schedule topology history cleaner
-            Integer interval = ObjectReader.getInt(conf.get(DaemonConfig.LOGVIEWER_CLEANUP_INTERVAL_SECS), null);
-            if (interval != null) {
-                final int lvCleanupAgeMins = ObjectReader.getInt(conf.get(DaemonConfig.LOGVIEWER_CLEANUP_AGE_MINS));
-                timer.scheduleRecurring(0, interval,
-                                        () -> {
-                                            try {
-                                                cleanTopologyHistory(lvCleanupAgeMins);
-                                            } catch (Exception e) {
-                                                throw new RuntimeException(e);
-                                            }
-                                        });
-            }
-
-            timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CREDENTIAL_RENEW_FREQ_SECS)),
-                                    () -> {
-                                        try {
-                                            renewCredentials();
-                                        } catch (Exception e) {
-                                            throw new RuntimeException(e);
-                                        }
-                                    });
-
-            metricsRegistry.registerGauge("nimbus:total-available-memory-non-negative", () -> nodeIdToResources.get().values()
-                .parallelStream()
-                .mapToDouble(supervisorResources -> Math.max(supervisorResources.getAvailableMem(), 0))
-                .sum());
-            metricsRegistry.registerGauge("nimbus:available-cpu-non-negative", () -> nodeIdToResources.get().values()
-                .parallelStream()
-                .mapToDouble(supervisorResources -> Math.max(supervisorResources.getAvailableCpu(), 0))
-                .sum());
-            metricsRegistry.registerGauge("nimbus:total-memory", () -> nodeIdToResources.get().values()
-                .parallelStream()
-                .mapToDouble(SupervisorResources::getTotalMem)
-                .sum());
-            metricsRegistry.registerGauge("nimbus:total-cpu", () -> nodeIdToResources.get().values()
-                .parallelStream()
-                .mapToDouble(SupervisorResources::getTotalCpu)
-                .sum());
-            metricsRegistry.registerGauge("nimbus:longest-scheduling-time-ms", () -> {
-                //We want to update longest scheduling time in real time in case scheduler get stuck
-                // Get current time before startTime to avoid potential race with scheduler's Timer
-                Long currTime = Time.nanoTime();
-                Long startTime = schedulingStartTimeNs.get();
-                return TimeUnit.NANOSECONDS.toMillis(startTime == null ?
-                        longestSchedulingTime.get() : Math.max(currTime - startTime, longestSchedulingTime.get()));
-            });
-            metricsRegistry.registerMeter("nimbus:num-launched").mark();
-
-            timer.scheduleRecurring(0, ObjectReader.getInt(conf.get(DaemonConfig.STORM_CLUSTER_METRICS_CONSUMER_PUBLISH_INTERVAL_SECS)),
-                                    () -> {
-                                        try {
-                                            if (isLeader()) {
-                                                sendClusterMetricsToExecutors();
-                                            }
-                                        } catch (Exception e) {
-                                            throw new RuntimeException(e);
-                                        }
-                                    });
-            
-            timer.scheduleRecurring(5, 5, clusterMetricSet);
-        } catch (Exception e) {
-            if (Utils.exceptionCauseIsInstanceOf(InterruptedException.class, e)) {
-                throw e;
-            }
-
-            if (Utils.exceptionCauseIsInstanceOf(InterruptedIOException.class, e)) {
-                throw e;
-            }
-            LOG.error("Error on initialization of nimbus", e);
-            Utils.exitProcess(13, "Error on initialization of nimbus");
-        }
-    }
     
     @VisibleForTesting
     public boolean awaitLeadership(long timeout, TimeUnit timeUnit) throws InterruptedException {
@@ -3022,15 +3038,15 @@
             ReqContext req = ReqContext.context();
             Principal principal = req.principal();
             String submitterPrincipal = principal == null ? null : principal.toString();
-            String submitterUser = principalToLocal.toLocal(principal);
-            String systemUser = System.getProperty("user.name");
             @SuppressWarnings("unchecked")
             Set<String> topoAcl = new HashSet<>((List<String>) topoConf.getOrDefault(Config.TOPOLOGY_USERS, Collections.emptyList()));
             topoAcl.add(submitterPrincipal);
+            String submitterUser = principalToLocal.toLocal(principal);
             topoAcl.add(submitterUser);
 
             String topologyPrincipal = Utils.OR(submitterPrincipal, "");
             topoConf.put(Config.TOPOLOGY_SUBMITTER_PRINCIPAL, topologyPrincipal);
+            String systemUser = System.getProperty("user.name");
             String topologyOwner = Utils.OR(submitterUser, systemUser);
             topoConf.put(Config.TOPOLOGY_SUBMITTER_USER, topologyOwner); //Don't let the user set who we launch as
             topoConf.put(Config.TOPOLOGY_USERS, new ArrayList<>(topoAcl));
@@ -3065,8 +3081,8 @@
 
             // if the Resource Aware Scheduler is used,
             // we might need to set the number of acker executors and eventlogger executors to be the estimated number of workers.
-            if (ServerUtils.isRAS(conf)) {
-                int estimatedNumWorker = ServerUtils.getEstimatedWorkerCountForRASTopo(totalConf, topology);
+            if (ServerUtils.isRas(conf)) {
+                int estimatedNumWorker = ServerUtils.getEstimatedWorkerCountForRasTopo(totalConf, topology);
                 int numAckerExecs = ObjectReader.getInt(totalConf.get(Config.TOPOLOGY_ACKER_EXECUTORS), estimatedNumWorker);
                 int numEventLoggerExecs = ObjectReader.getInt(totalConf.get(Config.TOPOLOGY_EVENTLOGGER_EXECUTORS), estimatedNumWorker);
 
@@ -3361,7 +3377,6 @@
             if (topoId == null) {
                 throw new WrappedNotAliveException(topoName);
             }
-            boolean hasCompId = componentId != null && !componentId.isEmpty();
 
             DebugOptions options = new DebugOptions();
             options.set_enable(enable);
@@ -3371,6 +3386,7 @@
             StormBase updates = new StormBase();
             //For backwards compatability
             updates.set_component_executors(Collections.emptyMap());
+            boolean hasCompId = componentId != null && !componentId.isEmpty();
             String key = hasCompId ? componentId : topoId;
             updates.put_to_component_debug(key, options);
 
@@ -4018,12 +4034,10 @@
             CommonTopoInfo common = getCommonTopoInfo(topoId, "getTopologyPageInfo");
             String topoName = common.topoName;
             IStormClusterState state = stormClusterState;
-            int launchTimeSecs = common.launchTimeSecs;
             Assignment assignment = common.assignment;
             Map<List<Integer>, Map<String, Object>> beats = common.beats;
             Map<Integer, String> taskToComp = common.taskToComponent;
             StormTopology topology = common.topology;
-            Map<String, Object> topoConf = Utils.merge(conf, common.topoConf);
             StormBase base = common.base;
             if (base == null) {
                 throw new WrappedNotAliveException(topoId);
@@ -4067,6 +4081,7 @@
                 topoPageInfo.set_storm_version(topology.get_storm_version());
             }
 
+            Map<String, Object> topoConf = Utils.merge(conf, common.topoConf);
             Map<String, NormalizedResourceRequest> spoutResources = ResourceUtils.getSpoutsResources(topology, topoConf);
             for (Entry<String, ComponentAggregateStats> entry : topoPageInfo.get_id_to_spout_agg_stats().entrySet()) {
                 CommonAggregateStats commonStats = entry.getValue().get_common_stats();
@@ -4115,6 +4130,7 @@
                 topoPageInfo.set_assigned_shared_on_heap_memory(resources.getAssignedSharedMemOnHeap());
                 topoPageInfo.set_assigned_regular_on_heap_memory(resources.getAssignedNonSharedMemOnHeap());
             }
+            int launchTimeSecs = common.launchTimeSecs;
             topoPageInfo.set_name(topoName);
             topoPageInfo.set_status(extractStatusStr(base));
             topoPageInfo.set_uptime_secs(Time.deltaSecs(launchTimeSecs));
@@ -4777,7 +4793,7 @@
         private final K key;
         private final V value;
 
-        public Assoc(K key, V value) {
+        Assoc(K key, V value) {
             this.key = key;
             this.value = value;
         }
@@ -4795,7 +4811,7 @@
     private static final class Dissoc<K, V> implements UnaryOperator<Map<K, V>> {
         private final K key;
 
-        public Dissoc(K key) {
+        Dissoc(K key) {
             this.key = key;
         }
 
@@ -4955,66 +4971,74 @@
                 }
             };
 
-            clusterSummaryMetrics.put("cluster:num-nimbus-leaders", new DerivativeGauge<ClusterSummary, Long>(cachedSummary) {
-                @Override
-                protected Long transform(ClusterSummary clusterSummary) {
-                    return clusterSummary.get_nimbuses().stream()
-                            .filter(NimbusSummary::is_isLeader)
-                            .count();
-                }
-            });
-            clusterSummaryMetrics.put("cluster:num-nimbuses", new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
-                @Override
-                protected Integer transform(ClusterSummary clusterSummary) {
-                    return clusterSummary.get_nimbuses_size();
-                }
-            });
-            clusterSummaryMetrics.put("cluster:num-supervisors", new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
-                @Override
-                protected Integer transform(ClusterSummary clusterSummary) {
-                    return clusterSummary.get_supervisors_size();
-                }
-            });
-            clusterSummaryMetrics.put("cluster:num-topologies", new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
-                @Override
-                protected Integer transform(ClusterSummary clusterSummary) {
-                    return clusterSummary.get_topologies_size();
-                }
-            });
-            clusterSummaryMetrics.put("cluster:num-total-workers", new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
-                @Override
-                protected Integer transform(ClusterSummary clusterSummary) {
-                    return clusterSummary.get_supervisors().stream()
-                            .mapToInt(SupervisorSummary::get_num_workers)
-                            .sum();
-                }
-            });
-            clusterSummaryMetrics.put("cluster:num-total-used-workers", new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
-                @Override
-                protected Integer transform(ClusterSummary clusterSummary) {
-                    return clusterSummary.get_supervisors().stream()
-                            .mapToInt(SupervisorSummary::get_num_used_workers)
-                            .sum();
-                }
-            });
-            clusterSummaryMetrics.put("cluster:total-fragmented-memory-non-negative", new DerivativeGauge<ClusterSummary, Double>(cachedSummary) {
-                @Override
-                protected Double transform(ClusterSummary clusterSummary) {
-                    return clusterSummary.get_supervisors().stream()
-                            //Filtered negative value
-                            .mapToDouble(supervisorSummary -> Math.max(supervisorSummary.get_fragmented_mem(), 0))
-                            .sum();
-                }
-            });
-            clusterSummaryMetrics.put("cluster:total-fragmented-cpu-non-negative", new DerivativeGauge<ClusterSummary, Double>(cachedSummary) {
-                @Override
-                protected Double transform(ClusterSummary clusterSummary) {
-                    return clusterSummary.get_supervisors().stream()
-                            //Filtered negative value
-                            .mapToDouble(supervisorSummary -> Math.max(supervisorSummary.get_fragmented_cpu(), 0))
-                            .sum();
-                }
-            });
+            clusterSummaryMetrics.put("cluster:num-nimbus-leaders",
+                    new DerivativeGauge<ClusterSummary, Long>(cachedSummary) {
+                        @Override
+                        protected Long transform(ClusterSummary clusterSummary) {
+                            return clusterSummary.get_nimbuses().stream()
+                                    .filter(NimbusSummary::is_isLeader)
+                                    .count();
+                        }
+                    });
+            clusterSummaryMetrics.put("cluster:num-nimbuses",
+                    new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
+                        @Override
+                        protected Integer transform(ClusterSummary clusterSummary) {
+                            return clusterSummary.get_nimbuses_size();
+                        }
+                    });
+            clusterSummaryMetrics.put("cluster:num-supervisors",
+                    new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
+                        @Override
+                        protected Integer transform(ClusterSummary clusterSummary) {
+                            return clusterSummary.get_supervisors_size();
+                        }
+                    });
+            clusterSummaryMetrics.put("cluster:num-topologies",
+                    new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
+                        @Override
+                        protected Integer transform(ClusterSummary clusterSummary) {
+                            return clusterSummary.get_topologies_size();
+                        }
+                    });
+            clusterSummaryMetrics.put("cluster:num-total-workers",
+                    new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
+                        @Override
+                        protected Integer transform(ClusterSummary clusterSummary) {
+                            return clusterSummary.get_supervisors().stream()
+                                    .mapToInt(SupervisorSummary::get_num_workers)
+                                    .sum();
+                        }
+                    });
+            clusterSummaryMetrics.put("cluster:num-total-used-workers",
+                    new DerivativeGauge<ClusterSummary, Integer>(cachedSummary) {
+                        @Override
+                        protected Integer transform(ClusterSummary clusterSummary) {
+                            return clusterSummary.get_supervisors().stream()
+                                    .mapToInt(SupervisorSummary::get_num_used_workers)
+                                    .sum();
+                        }
+                    });
+            clusterSummaryMetrics.put("cluster:total-fragmented-memory-non-negative",
+                    new DerivativeGauge<ClusterSummary, Double>(cachedSummary) {
+                        @Override
+                        protected Double transform(ClusterSummary clusterSummary) {
+                            return clusterSummary.get_supervisors().stream()
+                                    //Filtered negative value
+                                    .mapToDouble(supervisorSummary -> Math.max(supervisorSummary.get_fragmented_mem(), 0))
+                                    .sum();
+                        }
+                    });
+            clusterSummaryMetrics.put("cluster:total-fragmented-cpu-non-negative",
+                    new DerivativeGauge<ClusterSummary, Double>(cachedSummary) {
+                        @Override
+                        protected Double transform(ClusterSummary clusterSummary) {
+                            return clusterSummary.get_supervisors().stream()
+                                    //Filtered negative value
+                                    .mapToDouble(supervisorSummary -> Math.max(supervisorSummary.get_fragmented_cpu(), 0))
+                                    .sum();
+                        }
+                    });
         }
 
         private void updateHistogram(ClusterSummary newSummary) {
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopoCache.java b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopoCache.java
index 0b0e70c..a6e0332 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopoCache.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopoCache.java
@@ -12,6 +12,8 @@
 
 package org.apache.storm.daemon.nimbus;
 
+import static org.apache.storm.blobstore.BlobStoreAclHandler.READ;
+
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
@@ -31,8 +33,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.storm.blobstore.BlobStoreAclHandler.READ;
-
 /**
  * Cache topologies and topology confs from the blob store.
  * Makes reading this faster because it can skip
@@ -44,6 +44,7 @@
     private final BlobStoreAclHandler aclHandler;
     private final ConcurrentHashMap<String, WithAcl<StormTopology>> topos = new ConcurrentHashMap<>();
     private final ConcurrentHashMap<String, WithAcl<Map<String, Object>>> confs = new ConcurrentHashMap<>();
+
     public TopoCache(BlobStore store, Map<String, Object> conf) {
         this.store = store;
         aclHandler = new BlobStoreAclHandler(conf);
@@ -228,7 +229,7 @@
         public final List<AccessControl> acl;
         public final T data;
 
-        public WithAcl(List<AccessControl> acl, T data) {
+        WithAcl(List<AccessControl> acl, T data) {
             this.acl = acl;
             this.data = data;
         }
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyActions.java b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyActions.java
index bc41da5..05f9996 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyActions.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyActions.java
@@ -13,7 +13,7 @@
 package org.apache.storm.daemon.nimbus;
 
 /**
- * Actions that can be done to a topology in nimbus
+ * Actions that can be done to a topology in nimbus.
  */
 public enum TopologyActions {
     STARTUP,
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyResources.java b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyResources.java
index bb932ed..f0db842 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyResources.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyResources.java
@@ -36,8 +36,9 @@
     private double assignedNonSharedMemOnHeap;
     private double assignedNonSharedMemOffHeap;
     private double assignedCpu;
+
     private TopologyResources(TopologyDetails td, Collection<WorkerResources> workers,
-                              Map<String, Double> sharedOffHeap) {
+                              Map<String, Double> nodeIdToSharedOffHeapNode) {
         requestedMemOnHeap = td.getTotalRequestedMemOnHeap();
         requestedMemOffHeap = td.getTotalRequestedMemOffHeap();
         requestedSharedMemOnHeap = td.getRequestedSharedOnHeap();
@@ -73,18 +74,21 @@
             }
         }
 
-        if (sharedOffHeap != null) {
-            double sharedOff = sharedOffHeap.values().stream().reduce(0.0, (sum, val) -> sum + val);
+        if (nodeIdToSharedOffHeapNode != null) {
+            double sharedOff = nodeIdToSharedOffHeapNode.values().stream().reduce(0.0, (sum, val) -> sum + val);
             assignedSharedMemOffHeap += sharedOff;
             assignedMemOffHeap += sharedOff;
         }
     }
+
     public TopologyResources(TopologyDetails td, SchedulerAssignment assignment) {
-        this(td, getWorkerResources(assignment), getNodeIdToSharedOffHeap(assignment));
+        this(td, getWorkerResources(assignment), getNodeIdToSharedOffHeapNode(assignment));
     }
+
     public TopologyResources(TopologyDetails td, Assignment assignment) {
-        this(td, getWorkerResources(assignment), getNodeIdToSharedOffHeap(assignment));
+        this(td, getWorkerResources(assignment), getNodeIdToSharedOffHeapNode(assignment));
     }
+
     public TopologyResources() {
         this(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
     }
@@ -142,15 +146,15 @@
         return ret;
     }
 
-    private static Map<String, Double> getNodeIdToSharedOffHeap(SchedulerAssignment assignment) {
+    private static Map<String, Double> getNodeIdToSharedOffHeapNode(SchedulerAssignment assignment) {
         Map<String, Double> ret = null;
         if (assignment != null) {
-            ret = assignment.getNodeIdToTotalSharedOffHeapMemory();
+            ret = assignment.getNodeIdToTotalSharedOffHeapNodeMemory();
         }
         return ret;
     }
 
-    private static Map<String, Double> getNodeIdToSharedOffHeap(Assignment assignment) {
+    private static Map<String, Double> getNodeIdToSharedOffHeapNode(Assignment assignment) {
         Map<String, Double> ret = null;
         if (assignment != null) {
             ret = assignment.get_total_shared_off_heap();
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyStateTransition.java b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyStateTransition.java
index 37019b9..5308df6 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyStateTransition.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/nimbus/TopologyStateTransition.java
@@ -15,8 +15,9 @@
 import org.apache.storm.generated.StormBase;
 
 /**
- * A transition from one state to another
+ * A transition from one state to another.
  */
 interface TopologyStateTransition {
+
     StormBase transition(Object argument, Nimbus nimbus, String topoId, StormBase base) throws Exception;
 }
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainer.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainer.java
index 1e61be6..153ac77 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainer.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainer.java
@@ -18,8 +18,10 @@
 
 package org.apache.storm.daemon.supervisor;
 
+import static org.apache.storm.daemon.nimbus.Nimbus.MIN_VERSION_SUPPORT_RPC_HEARTBEAT;
+import static org.apache.storm.utils.Utils.OR;
+
 import java.io.File;
-import java.io.FilenameFilter;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -41,6 +43,7 @@
 import org.apache.storm.generated.ProfileRequest;
 import org.apache.storm.generated.StormTopology;
 import org.apache.storm.generated.WorkerResources;
+import org.apache.storm.metric.StormMetricsRegistry;
 import org.apache.storm.shade.com.google.common.base.Joiner;
 import org.apache.storm.shade.com.google.common.collect.Lists;
 import org.apache.storm.utils.ConfigUtils;
@@ -55,30 +58,24 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.storm.daemon.nimbus.Nimbus.MIN_VERSION_SUPPORT_RPC_HEARTBEAT;
-import static org.apache.storm.utils.Utils.OR;
-
-import org.apache.storm.metric.StormMetricsRegistry;
 
 /**
  * A container that runs processes on the local box.
  */
 public class BasicContainer extends Container {
-    static final TopoMetaLRUCache TOPO_META_CACHE = new TopoMetaLRUCache();
+    static final TopoMetaLruCache TOPO_META_CACHE = new TopoMetaLruCache();
     private static final Logger LOG = LoggerFactory.getLogger(BasicContainer.class);
-    private static final FilenameFilter jarFilter = (dir, name) -> name.endsWith(".jar");
-    private static final Joiner CPJ =
-        Joiner.on(File.pathSeparator).skipNulls();
-    protected final LocalState _localState;
-    protected final String _profileCmd;
-    protected final String _stormHome = System.getProperty(ConfigUtils.STORM_HOME);
+    private static final Joiner CPJ = Joiner.on(File.pathSeparator).skipNulls();
+    protected final LocalState localState;
+    protected final String profileCmd;
+    protected final String stormHome = System.getProperty(ConfigUtils.STORM_HOME);
     protected final double hardMemoryLimitMultiplier;
     protected final long hardMemoryLimitOver;
-    protected final long lowMemoryThresholdMB;
+    protected final long lowMemoryThresholdMb;
     protected final long mediumMemoryThresholdMb;
     protected final long mediumMemoryGracePeriodMs;
-    protected volatile boolean _exitedEarly = false;
-    protected volatile long memoryLimitMB;
+    protected volatile boolean exitedEarly = false;
+    protected volatile long memoryLimitMb;
     protected volatile long memoryLimitExceededStart;
 
     /**
@@ -132,7 +129,7 @@
         super(type, conf, supervisorId, supervisorPort, port, assignment,
             resourceIsolationManager, workerId, topoConf, ops, metricsRegistry, containerMemoryTracker);
         assert (localState != null);
-        _localState = localState;
+        this.localState = localState;
 
         if (type.isRecovery() && !type.isOnlyKillable()) {
             synchronized (localState) {
@@ -147,23 +144,23 @@
                     throw new ContainerRecoveryException("Could not find worker id for " + port + " " + assignment);
                 }
                 LOG.info("Recovered Worker {}", wid);
-                _workerId = wid;
+                this.workerId = wid;
             }
-        } else if (_workerId == null) {
+        } else if (this.workerId == null) {
             createNewWorkerId();
         }
 
         if (profileCmd == null) {
-            profileCmd = _stormHome + File.separator + "bin" + File.separator
+            profileCmd = stormHome + File.separator + "bin" + File.separator
                          + conf.get(DaemonConfig.WORKER_PROFILER_COMMAND);
         }
-        _profileCmd = profileCmd;
+        this.profileCmd = profileCmd;
 
         hardMemoryLimitMultiplier =
             ObjectReader.getDouble(conf.get(DaemonConfig.STORM_SUPERVISOR_HARD_MEMORY_LIMIT_MULTIPLIER), 2.0);
         hardMemoryLimitOver =
             ObjectReader.getInt(conf.get(DaemonConfig.STORM_SUPERVISOR_HARD_LIMIT_MEMORY_OVERAGE_MB), 0);
-        lowMemoryThresholdMB = ObjectReader.getInt(conf.get(DaemonConfig.STORM_SUPERVISOR_LOW_MEMORY_THRESHOLD_MB), 1024);
+        lowMemoryThresholdMb = ObjectReader.getInt(conf.get(DaemonConfig.STORM_SUPERVISOR_LOW_MEMORY_THRESHOLD_MB), 1024);
         mediumMemoryThresholdMb =
             ObjectReader.getInt(conf.get(DaemonConfig.STORM_SUPERVISOR_MEDIUM_MEMORY_THRESHOLD_MB), 1536);
         mediumMemoryGracePeriodMs =
@@ -171,14 +168,14 @@
 
         if (assignment != null) {
             WorkerResources resources = assignment.get_resources();
-            memoryLimitMB = calculateMemoryLimit(resources, getMemOnHeap(resources));
+            memoryLimitMb = calculateMemoryLimit(resources, getMemOnHeap(resources));
         }
     }
 
-    private static void removeWorkersOn(Map<String, Integer> workerToPort, int _port) {
+    private static void removeWorkersOn(Map<String, Integer> workerToPort, int port) {
         for (Iterator<Entry<String, Integer>> i = workerToPort.entrySet().iterator(); i.hasNext(); ) {
             Entry<String, Integer> found = i.next();
-            if (_port == found.getValue().intValue()) {
+            if (port == found.getValue().intValue()) {
                 LOG.warn("Deleting worker {} from state", found.getKey());
                 i.remove();
             }
@@ -200,31 +197,31 @@
      * up and running. We will lose track of the process.
      */
     protected void createNewWorkerId() {
-        _type.assertFull();
-        assert (_workerId == null);
-        synchronized (_localState) {
-            _workerId = Utils.uuid();
-            Map<String, Integer> workerToPort = _localState.getApprovedWorkers();
+        type.assertFull();
+        assert (workerId == null);
+        synchronized (localState) {
+            workerId = Utils.uuid();
+            Map<String, Integer> workerToPort = localState.getApprovedWorkers();
             if (workerToPort == null) {
                 workerToPort = new HashMap<>(1);
             }
-            removeWorkersOn(workerToPort, _port);
-            workerToPort.put(_workerId, _port);
-            _localState.setApprovedWorkers(workerToPort);
-            LOG.info("Created Worker ID {}", _workerId);
+            removeWorkersOn(workerToPort, port);
+            workerToPort.put(workerId, port);
+            localState.setApprovedWorkers(workerToPort);
+            LOG.info("Created Worker ID {}", workerId);
         }
     }
 
     @Override
     public void cleanUpForRestart() throws IOException {
-        String origWorkerId = _workerId;
+        String origWorkerId = workerId;
         super.cleanUpForRestart();
-        synchronized (_localState) {
-            Map<String, Integer> workersToPort = _localState.getApprovedWorkers();
+        synchronized (localState) {
+            Map<String, Integer> workersToPort = localState.getApprovedWorkers();
             if (workersToPort != null) {
                 workersToPort.remove(origWorkerId);
-                removeWorkersOn(workersToPort, _port);
-                _localState.setApprovedWorkers(workersToPort);
+                removeWorkersOn(workersToPort, port);
+                localState.setApprovedWorkers(workersToPort);
                 LOG.info("Removed Worker ID {}", origWorkerId);
             } else {
                 LOG.warn("No approved workers exists");
@@ -234,9 +231,9 @@
 
     @Override
     public void relaunch() throws IOException {
-        _type.assertFull();
+        type.assertFull();
         //We are launching it now...
-        _type = ContainerType.LAUNCH;
+        type = ContainerType.LAUNCH;
         createNewWorkerId();
         setup();
         launch();
@@ -244,7 +241,7 @@
 
     @Override
     public boolean didMainProcessExit() {
-        return _exitedEarly;
+        return exitedEarly;
     }
 
     /**
@@ -261,7 +258,7 @@
      */
     protected boolean runProfilingCommand(List<String> command, Map<String, String> env, String logPrefix,
                                           File targetDir) throws IOException, InterruptedException {
-        _type.assertFull();
+        type.assertFull();
         Process p = ClientSupervisorUtils.launchProcess(command, env, logPrefix, null, targetDir);
         int ret = p.waitFor();
         return ret == 0;
@@ -269,21 +266,21 @@
 
     @Override
     public boolean runProfiling(ProfileRequest request, boolean stop) throws IOException, InterruptedException {
-        _type.assertFull();
-        String targetDir = ConfigUtils.workerArtifactsRoot(_conf, _topologyId, _port);
+        type.assertFull();
+        String targetDir = ConfigUtils.workerArtifactsRoot(conf, topologyId, port);
 
         @SuppressWarnings("unchecked")
-        Map<String, String> env = (Map<String, String>) _topoConf.get(Config.TOPOLOGY_ENVIRONMENT);
+        Map<String, String> env = (Map<String, String>) topoConf.get(Config.TOPOLOGY_ENVIRONMENT);
         if (env == null) {
             env = new HashMap<>();
         }
 
-        String str = ConfigUtils.workerArtifactsPidPath(_conf, _topologyId, _port);
+        String str = ConfigUtils.workerArtifactsPidPath(conf, topologyId, port);
 
-        String workerPid = _ops.slurpString(new File(str)).trim();
+        String workerPid = ops.slurpString(new File(str)).trim();
 
         ProfileAction profileAction = request.get_action();
-        String logPrefix = "ProfilerAction process " + _topologyId + ":" + _port + " PROFILER_ACTION: " + profileAction
+        String logPrefix = "ProfilerAction process " + topologyId + ":" + port + " PROFILER_ACTION: " + profileAction
                            + " ";
 
         List<String> command = mkProfileCommand(profileAction, stop, workerPid, targetDir);
@@ -326,27 +323,27 @@
     }
 
     private List<String> jmapDumpCmd(String pid, String targetDir) {
-        return Lists.newArrayList(_profileCmd, pid, "jmap", targetDir);
+        return Lists.newArrayList(profileCmd, pid, "jmap", targetDir);
     }
 
     private List<String> jstackDumpCmd(String pid, String targetDir) {
-        return Lists.newArrayList(_profileCmd, pid, "jstack", targetDir);
+        return Lists.newArrayList(profileCmd, pid, "jstack", targetDir);
     }
 
     private List<String> jprofileStart(String pid) {
-        return Lists.newArrayList(_profileCmd, pid, "start");
+        return Lists.newArrayList(profileCmd, pid, "start");
     }
 
     private List<String> jprofileStop(String pid, String targetDir) {
-        return Lists.newArrayList(_profileCmd, pid, "stop", targetDir);
+        return Lists.newArrayList(profileCmd, pid, "stop", targetDir);
     }
 
     private List<String> jprofileDump(String pid, String targetDir) {
-        return Lists.newArrayList(_profileCmd, pid, "dump", targetDir);
+        return Lists.newArrayList(profileCmd, pid, "dump", targetDir);
     }
 
     private List<String> jprofileJvmRestart(String pid) {
-        return Lists.newArrayList(_profileCmd, pid, "kill");
+        return Lists.newArrayList(profileCmd, pid, "kill");
     }
 
     /**
@@ -378,12 +375,11 @@
     }
 
     protected List<String> frameworkClasspath(SimpleVersion topoVersion) {
-        File stormWorkerLibDir = new File(_stormHome, "lib-worker");
-        String topoConfDir =
-            System.getenv("STORM_CONF_DIR") != null ?
-                System.getenv("STORM_CONF_DIR") :
-                new File(_stormHome, "conf").getAbsolutePath();
-        File stormExtlibDir = new File(_stormHome, "extlib");
+        File stormWorkerLibDir = new File(stormHome, "lib-worker");
+        String topoConfDir = System.getenv("STORM_CONF_DIR") != null
+                ? System.getenv("STORM_CONF_DIR")
+                : new File(stormHome, "conf").getAbsolutePath();
+        File stormExtlibDir = new File(stormHome, "extlib");
         String extcp = System.getenv("STORM_EXT_CLASSPATH");
         List<String> pathElements = new LinkedList<>();
         pathElements.add(getWildcardDir(stormWorkerLibDir));
@@ -391,7 +387,7 @@
         pathElements.add(extcp);
         pathElements.add(topoConfDir);
 
-        NavigableMap<SimpleVersion, List<String>> classpaths = Utils.getConfiguredClasspathVersions(_conf, pathElements);
+        NavigableMap<SimpleVersion, List<String>> classpaths = Utils.getConfiguredClasspathVersions(conf, pathElements);
 
         return Utils.getCompatibleVersion(classpaths, topoVersion, "classpath", pathElements);
     }
@@ -405,7 +401,7 @@
             //Have not moved to a java worker yet
             defaultWorkerGuess = "org.apache.storm.daemon.worker";
         }
-        NavigableMap<SimpleVersion, String> mains = Utils.getConfiguredWorkerMainVersions(_conf);
+        NavigableMap<SimpleVersion, String> mains = Utils.getConfiguredWorkerMainVersions(conf);
         return Utils.getCompatibleVersion(mains, topoVersion, "worker main class", defaultWorkerGuess);
     }
 
@@ -415,7 +411,7 @@
             //Prior to the org.apache change
             defaultGuess = "backtype.storm.LogWriter";
         }
-        NavigableMap<SimpleVersion, String> mains = Utils.getConfiguredWorkerLogWriterVersions(_conf);
+        NavigableMap<SimpleVersion, String> mains = Utils.getConfiguredWorkerLogWriterVersions(conf);
         return Utils.getCompatibleVersion(mains, topoVersion, "worker log writer class", defaultGuess);
     }
 
@@ -439,26 +435,26 @@
      */
     protected String getWorkerClassPath(String stormJar, List<String> dependencyLocations, SimpleVersion topoVersion) {
         List<String> workercp = new ArrayList<>();
-        workercp.addAll(asStringList(_topoConf.get(Config.TOPOLOGY_CLASSPATH_BEGINNING)));
+        workercp.addAll(asStringList(topoConf.get(Config.TOPOLOGY_CLASSPATH_BEGINNING)));
         workercp.addAll(frameworkClasspath(topoVersion));
         workercp.add(stormJar);
         workercp.addAll(dependencyLocations);
-        workercp.addAll(asStringList(_topoConf.get(Config.TOPOLOGY_CLASSPATH)));
+        workercp.addAll(asStringList(topoConf.get(Config.TOPOLOGY_CLASSPATH)));
         return CPJ.join(workercp);
     }
 
     private String substituteChildOptsInternal(String string, int memOnheap) {
         if (StringUtils.isNotBlank(string)) {
-            String p = String.valueOf(_port);
+            String p = String.valueOf(port);
             string = string.replace("%ID%", p);
-            string = string.replace("%WORKER-ID%", _workerId);
-            string = string.replace("%TOPOLOGY-ID%", _topologyId);
+            string = string.replace("%WORKER-ID%", workerId);
+            string = string.replace("%TOPOLOGY-ID%", topologyId);
             string = string.replace("%WORKER-PORT%", p);
             if (memOnheap > 0) {
                 string = string.replace("%HEAP-MEM%", String.valueOf(memOnheap));
             }
-            if (memoryLimitMB > 0) {
-                string = string.replace("%LIMIT-MEM%", String.valueOf(memoryLimitMB));
+            if (memoryLimitMb > 0) {
+                string = string.replace("%LIMIT-MEM%", String.valueOf(memoryLimitMb));
             }
         }
         return string;
@@ -507,21 +503,21 @@
      */
     protected void launchWorkerProcess(List<String> command, Map<String, String> env, String logPrefix,
                                        ExitCodeCallback processExitCallback, File targetDir) throws IOException {
-        if (_resourceIsolationManager != null) {
-            command = _resourceIsolationManager.getLaunchCommand(_workerId, command);
+        if (resourceIsolationManager != null) {
+            command = resourceIsolationManager.getLaunchCommand(workerId, command);
         }
         ClientSupervisorUtils.launchProcess(command, env, logPrefix, processExitCallback, targetDir);
     }
 
     private String getWorkerLoggingConfigFile() {
-        String log4jConfigurationDir = (String) (_conf.get(DaemonConfig.STORM_LOG4J2_CONF_DIR));
+        String log4jConfigurationDir = (String) (conf.get(DaemonConfig.STORM_LOG4J2_CONF_DIR));
 
         if (StringUtils.isNotBlank(log4jConfigurationDir)) {
             if (!ServerUtils.isAbsolutePath(log4jConfigurationDir)) {
-                log4jConfigurationDir = _stormHome + File.separator + log4jConfigurationDir;
+                log4jConfigurationDir = stormHome + File.separator + log4jConfigurationDir;
             }
         } else {
-            log4jConfigurationDir = _stormHome + File.separator + "log4j2";
+            log4jConfigurationDir = stormHome + File.separator + "log4j2";
         }
 
         if (ServerUtils.IS_ON_WINDOWS && !log4jConfigurationDir.startsWith("file:")) {
@@ -540,7 +536,7 @@
      */
     private List<String> getClassPathParams(final String stormRoot, final SimpleVersion topoVersion) throws IOException {
         final String stormJar = ConfigUtils.supervisorStormJarPath(stormRoot);
-        final List<String> dependencyLocations = getDependencyLocationsFor(_conf, _topologyId, _ops, stormRoot);
+        final List<String> dependencyLocations = getDependencyLocationsFor(conf, topologyId, ops, stormRoot);
         final String workerClassPath = getWorkerClassPath(stormJar, dependencyLocations, topoVersion);
 
         List<String> classPathParams = new ArrayList<>();
@@ -556,42 +552,43 @@
      * @return a list of command line options
      */
     private List<String> getCommonParams() {
-        final String workersArtifacts = ConfigUtils.workerArtifactsRoot(_conf);
+        final String workersArtifacts = ConfigUtils.workerArtifactsRoot(conf);
         String stormLogDir = ConfigUtils.getLogDir();
         
         List<String> commonParams = new ArrayList<>();
-        commonParams.add("-Dlogging.sensitivity=" + OR((String) _topoConf.get(Config.TOPOLOGY_LOGGING_SENSITIVITY), "S3"));
+        commonParams.add("-Dlogging.sensitivity=" + OR((String) topoConf.get(Config.TOPOLOGY_LOGGING_SENSITIVITY), "S3"));
         commonParams.add("-Dlogfile.name=worker.log");
-        commonParams.add("-Dstorm.home=" + OR(_stormHome, ""));
+        commonParams.add("-Dstorm.home=" + OR(stormHome, ""));
         commonParams.add("-Dworkers.artifacts=" + workersArtifacts);
-        commonParams.add("-Dstorm.id=" + _topologyId);
-        commonParams.add("-Dworker.id=" + _workerId);
-        commonParams.add("-Dworker.port=" + _port);
+        commonParams.add("-Dstorm.id=" + topologyId);
+        commonParams.add("-Dworker.id=" + workerId);
+        commonParams.add("-Dworker.port=" + port);
         commonParams.add("-Dstorm.log.dir=" + stormLogDir);
         commonParams.add("-DLog4jContextSelector=org.apache.logging.log4j.core.selector.BasicContextSelector");
-        commonParams.add("-Dstorm.local.dir=" + _conf.get(Config.STORM_LOCAL_DIR));
-        if (memoryLimitMB > 0) {
-            commonParams.add("-Dworker.memory_limit_mb=" + memoryLimitMB);
+        commonParams.add("-Dstorm.local.dir=" + conf.get(Config.STORM_LOCAL_DIR));
+        if (memoryLimitMb > 0) {
+            commonParams.add("-Dworker.memory_limit_mb=" + memoryLimitMb);
         }
         return commonParams;
     }
 
     private int getMemOnHeap(WorkerResources resources) {
         int memOnheap = 0;
-        if (resources != null && resources.is_set_mem_on_heap() &&
-            resources.get_mem_on_heap() > 0) {
+        if (resources != null
+                && resources.is_set_mem_on_heap()
+                && resources.get_mem_on_heap() > 0) {
             memOnheap = (int) Math.ceil(resources.get_mem_on_heap());
         } else {
             // set the default heap memory size for supervisor-test
-            memOnheap = ObjectReader.getInt(_topoConf.get(Config.WORKER_HEAP_MEMORY_MB), 768);
+            memOnheap = ObjectReader.getInt(topoConf.get(Config.WORKER_HEAP_MEMORY_MB), 768);
         }
         return memOnheap;
     }
 
     private List<String> getWorkerProfilerChildOpts(int memOnheap) {
         List<String> workerProfilerChildopts = new ArrayList<>();
-        if (ObjectReader.getBoolean(_conf.get(DaemonConfig.WORKER_PROFILER_ENABLED), false)) {
-            workerProfilerChildopts = substituteChildopts(_conf.get(DaemonConfig.WORKER_PROFILER_CHILDOPTS), memOnheap);
+        if (ObjectReader.getBoolean(conf.get(DaemonConfig.WORKER_PROFILER_ENABLED), false)) {
+            workerProfilerChildopts = substituteChildopts(conf.get(DaemonConfig.WORKER_PROFILER_CHILDOPTS), memOnheap);
         }
         return workerProfilerChildopts;
     }
@@ -622,10 +619,10 @@
         final String javaCmd = javaCmd("java");
         final String stormOptions = ConfigUtils.concatIfNotNull(System.getProperty("storm.options"));
         final String topoConfFile = ConfigUtils.concatIfNotNull(System.getProperty("storm.conf.file"));
-        final String workerTmpDir = ConfigUtils.workerTmpRoot(_conf, _workerId);
-        String topoVersionString = getStormVersionFor(_conf, _topologyId, _ops, stormRoot);
+        final String workerTmpDir = ConfigUtils.workerTmpRoot(conf, workerId);
+        String topoVersionString = getStormVersionFor(conf, topologyId, ops, stormRoot);
         if (topoVersionString == null) {
-            topoVersionString = (String) _conf.getOrDefault(Config.SUPERVISOR_WORKER_DEFAULT_VERSION, VersionInfo.getVersion());
+            topoVersionString = (String) conf.getOrDefault(Config.SUPERVISOR_WORKER_DEFAULT_VERSION, VersionInfo.getVersion());
         }
         final SimpleVersion topoVersion = new SimpleVersion(topoVersionString);
 
@@ -634,8 +631,8 @@
 
         String log4jConfigurationFile = getWorkerLoggingConfigFile();
         String workerLog4jConfig = log4jConfigurationFile;
-        if (_topoConf.get(Config.TOPOLOGY_LOGGING_CONFIG_FILE) != null) {
-            workerLog4jConfig = workerLog4jConfig + "," + _topoConf.get(Config.TOPOLOGY_LOGGING_CONFIG_FILE);
+        if (topoConf.get(Config.TOPOLOGY_LOGGING_CONFIG_FILE) != null) {
+            workerLog4jConfig = workerLog4jConfig + "," + topoConf.get(Config.TOPOLOGY_LOGGING_CONFIG_FILE);
         }
 
         List<String> commandList = new ArrayList<>();
@@ -644,7 +641,7 @@
             //Log Writer Command...
             commandList.add(javaCmd);
             commandList.addAll(classPathParams);
-            commandList.addAll(substituteChildopts(_topoConf.get(Config.TOPOLOGY_WORKER_LOGWRITER_CHILDOPTS)));
+            commandList.addAll(substituteChildopts(topoConf.get(Config.TOPOLOGY_WORKER_LOGWRITER_CHILDOPTS)));
             commandList.addAll(commonParams);
             commandList.add("-Dlog4j.configurationFile=" + log4jConfigurationFile);
             commandList.add(logWriter); //The LogWriter in turn launches the actual worker.
@@ -655,11 +652,11 @@
         commandList.add("-server");
         commandList.addAll(commonParams);
         commandList.add("-Dlog4j.configurationFile=" + workerLog4jConfig);
-        commandList.addAll(substituteChildopts(_conf.get(Config.WORKER_CHILDOPTS), memOnheap));
-        commandList.addAll(substituteChildopts(_topoConf.get(Config.TOPOLOGY_WORKER_CHILDOPTS), memOnheap));
+        commandList.addAll(substituteChildopts(conf.get(Config.WORKER_CHILDOPTS), memOnheap));
+        commandList.addAll(substituteChildopts(topoConf.get(Config.TOPOLOGY_WORKER_CHILDOPTS), memOnheap));
         commandList.addAll(substituteChildopts(Utils.OR(
-            _topoConf.get(Config.TOPOLOGY_WORKER_GC_CHILDOPTS),
-            _conf.get(Config.WORKER_GC_CHILDOPTS)), memOnheap));
+            topoConf.get(Config.TOPOLOGY_WORKER_GC_CHILDOPTS),
+            conf.get(Config.WORKER_GC_CHILDOPTS)), memOnheap));
         commandList.addAll(getWorkerProfilerChildOpts(memOnheap));
         commandList.add("-Djava.library.path=" + jlp);
         commandList.add("-Dstorm.conf.file=" + topoConfFile);
@@ -667,18 +664,18 @@
         commandList.add("-Djava.io.tmpdir=" + workerTmpDir);
         commandList.addAll(classPathParams);
         commandList.add(getWorkerMain(topoVersion));
-        commandList.add(_topologyId);
-        commandList.add(_supervisorId);
+        commandList.add(topologyId);
+        commandList.add(supervisorId);
 
         // supervisor port should be only presented to worker which supports RPC heartbeat
         // unknown version should be treated as "current version", which supports RPC heartbeat
-        if ((topoVersion.getMajor() == -1 && topoVersion.getMinor() == -1) ||
-            topoVersion.compareTo(MIN_VERSION_SUPPORT_RPC_HEARTBEAT) >= 0) {
-            commandList.add(String.valueOf(_supervisorPort));
+        if ((topoVersion.getMajor() == -1 && topoVersion.getMinor() == -1)
+                || topoVersion.compareTo(MIN_VERSION_SUPPORT_RPC_HEARTBEAT) >= 0) {
+            commandList.add(String.valueOf(supervisorPort));
         }
 
-        commandList.add(String.valueOf(_port));
-        commandList.add(_workerId);
+        commandList.add(String.valueOf(port));
+        commandList.add(workerId);
 
         return commandList;
     }
@@ -688,7 +685,7 @@
         if (super.isMemoryLimitViolated(withUpdatedLimits)) {
             return true;
         }
-        if (_resourceIsolationManager != null) {
+        if (resourceIsolationManager != null) {
             // In the short term the goal is to not shoot anyone unless we really need to.
             // The on heap should limit the memory usage in most cases to a reasonable amount
             // If someone is using way more than they requested this is a bug and we should
@@ -706,12 +703,12 @@
                 usageMb = getTotalTopologyMemoryUsed();
                 memoryLimitMb = getTotalTopologyMemoryReserved(withUpdatedLimits);
                 hardMemoryLimitOver = this.hardMemoryLimitOver * getTotalWorkersForThisTopology();
-                typeOfCheck = "TOPOLOGY " + _topologyId;
+                typeOfCheck = "TOPOLOGY " + topologyId;
             } else {
                 usageMb = getMemoryUsageMb();
-                memoryLimitMb = this.memoryLimitMB;
+                memoryLimitMb = this.memoryLimitMb;
                 hardMemoryLimitOver = this.hardMemoryLimitOver;
-                typeOfCheck = "WORKER " + _workerId;
+                typeOfCheck = "WORKER " + workerId;
             }
             LOG.debug(
                 "Enforcing memory usage for {} with usage of {} out of {} total and a hard limit of {}",
@@ -735,13 +732,13 @@
                 // to be use. If we cannot calculate it assume that it is bad
                 long systemFreeMemoryMb = 0;
                 try {
-                    systemFreeMemoryMb = _resourceIsolationManager.getSystemFreeMemoryMb();
+                    systemFreeMemoryMb = resourceIsolationManager.getSystemFreeMemoryMb();
                 } catch (IOException e) {
                     LOG.warn("Error trying to calculate free memory on the system {}", e);
                 }
                 LOG.debug("SYSTEM MEMORY FREE {} MB", systemFreeMemoryMb);
                 //If the system is low on memory we cannot be kind and need to shoot something
-                if (systemFreeMemoryMb <= lowMemoryThresholdMB) {
+                if (systemFreeMemoryMb <= lowMemoryThresholdMb) {
                     LOG.warn(
                         "{} is using {} MB > memory limit {} MB and system is low on memory {} free",
                         typeOfCheck,
@@ -784,8 +781,8 @@
     public long getMemoryUsageMb() {
         try {
             long ret = 0;
-            if (_resourceIsolationManager != null) {
-                long usageBytes = _resourceIsolationManager.getMemoryUsage(_workerId);
+            if (resourceIsolationManager != null) {
+                long usageBytes = resourceIsolationManager.getMemoryUsage(workerId);
                 if (usageBytes >= 0) {
                     ret = usageBytes / 1024 / 1024;
                 }
@@ -799,18 +796,18 @@
 
     @Override
     public long getMemoryReservationMb() {
-        return memoryLimitMB;
+        return memoryLimitMb;
     }
 
     private long calculateMemoryLimit(final WorkerResources resources, final int memOnHeap) {
         long ret = memOnHeap;
-        if (_resourceIsolationManager != null) {
+        if (resourceIsolationManager != null) {
             final int memoffheap = (int) Math.ceil(resources.get_mem_off_heap());
             final int extraMem =
                 (int)
                     (Math.ceil(
                         ObjectReader.getDouble(
-                            _conf.get(DaemonConfig.STORM_SUPERVISOR_MEMORY_LIMIT_TOLERANCE_MARGIN_MB),
+                            conf.get(DaemonConfig.STORM_SUPERVISOR_MEMORY_LIMIT_TOLERANCE_MARGIN_MB),
                             0.0)));
             ret += memoffheap + extraMem;
         }
@@ -819,146 +816,147 @@
 
     @Override
     public void launch() throws IOException {
-        _type.assertFull();
-        LOG.info("Launching worker with assignment {} for this supervisor {} on port {} with id {}", _assignment,
-                 _supervisorId, _port, _workerId);
-        String logPrefix = "Worker Process " + _workerId;
-        ProcessExitCallback processExitCallback = new ProcessExitCallback(logPrefix);
-        _exitedEarly = false;
+        type.assertFull();
+        LOG.info("Launching worker with assignment {} for this supervisor {} on port {} with id {}", assignment,
+                supervisorId, port, workerId);
+        exitedEarly = false;
 
-        final WorkerResources resources = _assignment.get_resources();
+        final WorkerResources resources = assignment.get_resources();
         final int memOnHeap = getMemOnHeap(resources);
-        memoryLimitMB = calculateMemoryLimit(resources, memOnHeap);
-        final String stormRoot = ConfigUtils.supervisorStormDistRoot(_conf, _topologyId);
-        String jlp = javaLibraryPath(stormRoot, _conf);
+        memoryLimitMb = calculateMemoryLimit(resources, memOnHeap);
+        final String stormRoot = ConfigUtils.supervisorStormDistRoot(conf, topologyId);
+        String jlp = javaLibraryPath(stormRoot, conf);
 
         Map<String, String> topEnvironment = new HashMap<String, String>();
         @SuppressWarnings("unchecked")
-        Map<String, String> environment = (Map<String, String>) _topoConf.get(Config.TOPOLOGY_ENVIRONMENT);
+        Map<String, String> environment = (Map<String, String>) topoConf.get(Config.TOPOLOGY_ENVIRONMENT);
         if (environment != null) {
             topEnvironment.putAll(environment);
         }
 
-        String ld_library_path = topEnvironment.get("LD_LIBRARY_PATH");
-        if (ld_library_path != null) {
-            jlp = jlp + System.getProperty("path.separator") + ld_library_path;
+        String ldLibraryPath = topEnvironment.get("LD_LIBRARY_PATH");
+        if (ldLibraryPath != null) {
+            jlp = jlp + System.getProperty("path.separator") + ldLibraryPath;
         }
 
         topEnvironment.put("LD_LIBRARY_PATH", jlp);
 
-        if (_resourceIsolationManager != null) {
+        if (resourceIsolationManager != null) {
             final int cpu = (int) Math.ceil(resources.get_cpu());
             //Save the memory limit so we can enforce it less strictly
-            _resourceIsolationManager.reserveResourcesForWorker(_workerId, (int) memoryLimitMB, cpu);
+            resourceIsolationManager.reserveResourcesForWorker(workerId, (int) memoryLimitMb, cpu);
         }
 
         List<String> commandList = mkLaunchCommand(memOnHeap, stormRoot, jlp);
 
         LOG.info("Launching worker with command: {}. ", ServerUtils.shellCmd(commandList));
 
-        String workerDir = ConfigUtils.workerRoot(_conf, _workerId);
+        String workerDir = ConfigUtils.workerRoot(conf, workerId);
 
+        String logPrefix = "Worker Process " + workerId;
+        ProcessExitCallback processExitCallback = new ProcessExitCallback(logPrefix);
         launchWorkerProcess(commandList, topEnvironment, logPrefix, processExitCallback, new File(workerDir));
     }
 
     private static class TopologyMetaData {
-        private final Map<String, Object> _conf;
-        private final String _topologyId;
-        private final AdvancedFSOps _ops;
-        private final String _stormRoot;
-        private boolean _dataCached = false;
-        private List<String> _depLocs = null;
-        private String _stormVersion = null;
+        private final Map<String, Object> conf;
+        private final String topologyId;
+        private final AdvancedFSOps ops;
+        private final String stormRoot;
+        private boolean dataCached = false;
+        private List<String> depLocs = null;
+        private String stormVersion = null;
 
-        public TopologyMetaData(final Map<String, Object> conf, final String topologyId, final AdvancedFSOps ops, final String stormRoot) {
-            _conf = conf;
-            _topologyId = topologyId;
-            _ops = ops;
-            _stormRoot = stormRoot;
+        TopologyMetaData(final Map<String, Object> conf, final String topologyId, final AdvancedFSOps ops, final String stormRoot) {
+            this.conf = conf;
+            this.topologyId = topologyId;
+            this.ops = ops;
+            this.stormRoot = stormRoot;
         }
 
+        @Override
         public String toString() {
             List<String> data;
             String stormVersion;
             synchronized (this) {
-                data = _depLocs;
-                stormVersion = _stormVersion;
+                data = depLocs;
+                stormVersion = this.stormVersion;
             }
-            return "META for " + _topologyId + " DEP_LOCS => " + data + " STORM_VERSION => " + stormVersion;
+            return "META for " + topologyId + " DEP_LOCS => " + data + " STORM_VERSION => " + stormVersion;
         }
 
         private synchronized void readData() throws IOException {
-            final StormTopology stormTopology = ConfigUtils.readSupervisorTopology(_conf, _topologyId, _ops);
+            final StormTopology stormTopology = ConfigUtils.readSupervisorTopology(conf, topologyId, ops);
             final List<String> dependencyLocations = new ArrayList<>();
             if (stormTopology.get_dependency_jars() != null) {
                 for (String dependency : stormTopology.get_dependency_jars()) {
-                    dependencyLocations.add(new File(_stormRoot, dependency).getAbsolutePath());
+                    dependencyLocations.add(new File(stormRoot, dependency).getAbsolutePath());
                 }
             }
 
             if (stormTopology.get_dependency_artifacts() != null) {
                 for (String dependency : stormTopology.get_dependency_artifacts()) {
-                    dependencyLocations.add(new File(_stormRoot, dependency).getAbsolutePath());
+                    dependencyLocations.add(new File(stormRoot, dependency).getAbsolutePath());
                 }
             }
-            _depLocs = dependencyLocations;
-            _stormVersion = stormTopology.get_storm_version();
-            _dataCached = true;
+            depLocs = dependencyLocations;
+            stormVersion = stormTopology.get_storm_version();
+            dataCached = true;
         }
 
         public synchronized List<String> getDepLocs() throws IOException {
-            if (!_dataCached) {
+            if (!dataCached) {
                 readData();
             }
-            return _depLocs;
+            return depLocs;
         }
 
         public synchronized String getStormVersion() throws IOException {
-            if (!_dataCached) {
+            if (!dataCached) {
                 readData();
             }
-            return _stormVersion;
+            return stormVersion;
         }
     }
 
-    static class TopoMetaLRUCache {
-        public final int _maxSize = 100; //We could make this configurable in the future...
+    static class TopoMetaLruCache {
+        public final int maxSize = 100; //We could make this configurable in the future...
 
         @SuppressWarnings("serial")
-        private LinkedHashMap<String, TopologyMetaData> _cache = new LinkedHashMap<String, TopologyMetaData>() {
+        private LinkedHashMap<String, TopologyMetaData> cache = new LinkedHashMap<String, TopologyMetaData>() {
             @Override
             protected boolean removeEldestEntry(Map.Entry<String, TopologyMetaData> eldest) {
-                return (size() > _maxSize);
+                return (size() > maxSize);
             }
         };
 
         public synchronized TopologyMetaData get(final Map<String, Object> conf, final String topologyId, final AdvancedFSOps ops,
                                                  String stormRoot) {
             //Only go off of the topology id for now.
-            TopologyMetaData dl = _cache.get(topologyId);
+            TopologyMetaData dl = cache.get(topologyId);
             if (dl == null) {
-                _cache.putIfAbsent(topologyId, new TopologyMetaData(conf, topologyId, ops, stormRoot));
-                dl = _cache.get(topologyId);
+                cache.putIfAbsent(topologyId, new TopologyMetaData(conf, topologyId, ops, stormRoot));
+                dl = cache.get(topologyId);
             }
             return dl;
         }
 
         public synchronized void clear() {
-            _cache.clear();
+            cache.clear();
         }
     }
 
     private class ProcessExitCallback implements ExitCodeCallback {
-        private final String _logPrefix;
+        private final String logPrefix;
 
-        public ProcessExitCallback(String logPrefix) {
-            _logPrefix = logPrefix;
+        ProcessExitCallback(String logPrefix) {
+            this.logPrefix = logPrefix;
         }
 
         @Override
         public void call(int exitCode) {
-            LOG.info("{} exited with code: {}", _logPrefix, exitCode);
-            _exitedEarly = true;
+            LOG.info("{} exited with code: {}", logPrefix, exitCode);
+            exitedEarly = true;
         }
     }
 }
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainerLauncher.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainerLauncher.java
index 9c000cb..51dceb4 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainerLauncher.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/BasicContainerLauncher.java
@@ -21,31 +21,31 @@
 import org.apache.storm.utils.LocalState;
 
 /**
- * Launch containers with no security using standard java commands
+ * Launch containers with no security using standard java commands.
  */
 public class BasicContainerLauncher extends ContainerLauncher {
-    protected final ResourceIsolationInterface _resourceIsolationManager;
-    private final Map<String, Object> _conf;
-    private final String _supervisorId;
-    private final int _supervisorPort;
+    protected final ResourceIsolationInterface resourceIsolationManager;
+    private final Map<String, Object> conf;
+    private final String supervisorId;
+    private final int supervisorPort;
     private final StormMetricsRegistry metricsRegistry;
     private final ContainerMemoryTracker containerMemoryTracker;
 
     public BasicContainerLauncher(Map<String, Object> conf, String supervisorId, int supervisorPort,
                                   ResourceIsolationInterface resourceIsolationManager, StormMetricsRegistry metricsRegistry,
                                   ContainerMemoryTracker containerMemoryTracker) throws IOException {
-        _conf = conf;
-        _supervisorId = supervisorId;
-        _supervisorPort = supervisorPort;
-        _resourceIsolationManager = resourceIsolationManager;
+        this.conf = conf;
+        this.supervisorId = supervisorId;
+        this.supervisorPort = supervisorPort;
+        this.resourceIsolationManager = resourceIsolationManager;
         this.metricsRegistry = metricsRegistry;
         this.containerMemoryTracker = containerMemoryTracker;
     }
 
     @Override
     public Container launchContainer(int port, LocalAssignment assignment, LocalState state) throws IOException {
-        Container container = new BasicContainer(ContainerType.LAUNCH, _conf, _supervisorId, _supervisorPort, port,
-            assignment, _resourceIsolationManager, state, null, metricsRegistry,
+        Container container = new BasicContainer(ContainerType.LAUNCH, conf, supervisorId, supervisorPort, port,
+            assignment, resourceIsolationManager, state, null, metricsRegistry,
             containerMemoryTracker);
         container.setup();
         container.launch();
@@ -54,13 +54,13 @@
 
     @Override
     public Container recoverContainer(int port, LocalAssignment assignment, LocalState state) throws IOException {
-        return new BasicContainer(ContainerType.RECOVER_FULL, _conf, _supervisorId, _supervisorPort, port, assignment,
-                                  _resourceIsolationManager, state, null, metricsRegistry, containerMemoryTracker);
+        return new BasicContainer(ContainerType.RECOVER_FULL, conf, supervisorId, supervisorPort, port, assignment,
+                resourceIsolationManager, state, null, metricsRegistry, containerMemoryTracker);
     }
 
     @Override
     public Killable recoverContainer(String workerId, LocalState localState) throws IOException {
-        return new BasicContainer(ContainerType.RECOVER_PARTIAL, _conf, _supervisorId, _supervisorPort, -1, null,
-                                  _resourceIsolationManager, localState, workerId, metricsRegistry, containerMemoryTracker);
+        return new BasicContainer(ContainerType.RECOVER_PARTIAL, conf, supervisorId, supervisorPort, -1, null,
+                resourceIsolationManager, localState, workerId, metricsRegistry, containerMemoryTracker);
     }
 }
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Container.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Container.java
index 8b58483..a25faad 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Container.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Container.java
@@ -74,18 +74,18 @@
     private final Timer shutdownDuration;
     private final Timer cleanupDuration;
 
-    protected final Map<String, Object> _conf;
-    protected final Map<String, Object> _topoConf; //Not set if RECOVER_PARTIAL
-    protected final String _topologyId; //Not set if RECOVER_PARTIAL
-    protected final String _supervisorId;
-    protected final int _supervisorPort;
-    protected final int _port; //Not set if RECOVER_PARTIAL
-    protected final LocalAssignment _assignment; //Not set if RECOVER_PARTIAL
-    protected final AdvancedFSOps _ops;
-    protected final ResourceIsolationInterface _resourceIsolationManager;
-    protected final boolean _symlinksDisabled;
-    protected String _workerId;
-    protected ContainerType _type;
+    protected final Map<String, Object> conf;
+    protected final Map<String, Object> topoConf; //Not set if RECOVER_PARTIAL
+    protected final String topologyId; //Not set if RECOVER_PARTIAL
+    protected final String supervisorId;
+    protected final int supervisorPort;
+    protected final int port; //Not set if RECOVER_PARTIAL
+    protected final LocalAssignment assignment; //Not set if RECOVER_PARTIAL
+    protected final AdvancedFSOps ops;
+    protected final ResourceIsolationInterface resourceIsolationManager;
+    protected final boolean symlinksDisabled;
+    protected String workerId;
+    protected ContainerType type;
     protected ContainerMemoryTracker containerMemoryTracker;
     private long lastMetricProcessTime = 0L;
     private Timer.Context shutdownTimer = null;
@@ -98,7 +98,7 @@
      * @param supervisorId the ID of the supervisor this is a part of.
      * @param supervisorPort the thrift server port of the supervisor this is a part of.
      * @param port the port the container is on. Should be <= 0 if only a partial recovery @param assignment
-     * the assignment for this container. Should be null if only a partial recovery.
+     *     the assignment for this container. Should be null if only a partial recovery.
      * @param resourceIsolationManager used to isolate resources for a container can be null if no isolation is used.
      * @param workerId the id of the worker to use. Must not be null if doing a partial recovery.
      * @param topoConf the config of the topology (mostly for testing) if null and not a partial recovery the real conf is read.
@@ -115,44 +115,44 @@
         assert (conf != null);
         assert (supervisorId != null);
 
-        _symlinksDisabled = (boolean) conf.getOrDefault(Config.DISABLE_SYMLINKS, false);
+        symlinksDisabled = (boolean) conf.getOrDefault(Config.DISABLE_SYMLINKS, false);
 
         if (ops == null) {
             ops = AdvancedFSOps.make(conf);
         }
 
-        _workerId = workerId;
-        _type = type;
-        _port = port;
-        _ops = ops;
-        _conf = conf;
-        _supervisorId = supervisorId;
-        _supervisorPort = supervisorPort;
-        _resourceIsolationManager = resourceIsolationManager;
-        _assignment = assignment;
+        this.workerId = workerId;
+        this.type = type;
+        this.port = port;
+        this.ops = ops;
+        this.conf = conf;
+        this.supervisorId = supervisorId;
+        this.supervisorPort = supervisorPort;
+        this.resourceIsolationManager = resourceIsolationManager;
+        this.assignment = assignment;
 
-        if (_type.isOnlyKillable()) {
-            assert (_assignment == null);
-            assert (_port <= 0);
-            assert (_workerId != null);
-            _topologyId = null;
-            _topoConf = null;
+        if (this.type.isOnlyKillable()) {
+            assert (this.assignment == null);
+            assert (this.port <= 0);
+            assert (this.workerId != null);
+            topologyId = null;
+            this.topoConf = null;
         } else {
             assert (assignment != null);
             assert (port > 0);
-            _topologyId = assignment.get_topology_id();
-            if (!_ops.doRequiredTopoFilesExist(_conf, _topologyId)) {
+            topologyId = assignment.get_topology_id();
+            if (!this.ops.doRequiredTopoFilesExist(this.conf, topologyId)) {
                 LOG.info(
                     "Missing topology storm code, so can't launch  worker with assignment {} for this supervisor {} on port {} with id {}",
-                    _assignment,
-                    _supervisorId, _port, _workerId);
+                        this.assignment,
+                        this.supervisorId, this.port, this.workerId);
                 throw new ContainerRecoveryException("Missing required topology files...");
             }
             if (topoConf == null) {
-                _topoConf = readTopoConf();
+                this.topoConf = readTopoConf();
             } else {
                 //For testing...
-                _topoConf = topoConf;
+                this.topoConf = topoConf;
             }
         }
         this.numCleanupExceptions = metricsRegistry.registerMeter("supervisor:num-cleanup-exceptions");
@@ -166,37 +166,26 @@
 
     @Override
     public String toString() {
-        return "topo:" + _topologyId + " worker:" + _workerId;
+        return "topo:" + topologyId + " worker:" + workerId;
     }
 
     protected Map<String, Object> readTopoConf() throws IOException {
-        assert (_topologyId != null);
-        return ConfigUtils.readSupervisorStormConf(_conf, _topologyId);
+        assert (topologyId != null);
+        return ConfigUtils.readSupervisorStormConf(conf, topologyId);
     }
 
     /**
      * Kill a given process.
      *
      * @param pid the id of the process to kill
-     * @throws IOException
      */
     protected void kill(long pid) throws IOException {
         ServerUtils.killProcessWithSigTerm(String.valueOf(pid));
     }
 
-    /**
-     * Kill a given process.
-     *
-     * @param pid the id of the process to kill
-     * @throws IOException
-     */
-    protected void forceKill(long pid) throws IOException {
-        ServerUtils.forceKillProcess(String.valueOf(pid));
-    }
-
     @Override
     public void kill() throws IOException {
-        LOG.info("Killing {}:{}", _supervisorId, _workerId);
+        LOG.info("Killing {}:{}", supervisorId, workerId);
         if (shutdownTimer == null) {
             shutdownTimer = shutdownDuration.time();
         }
@@ -212,9 +201,18 @@
         }
     }
 
+    /**
+     * Kill a given process.
+     *
+     * @param pid the id of the process to kill
+     */
+    protected void forceKill(long pid) throws IOException {
+        ServerUtils.forceKillProcess(String.valueOf(pid));
+    }
+
     @Override
     public void forceKill() throws IOException {
-        LOG.info("Force Killing {}:{}", _supervisorId, _workerId);
+        LOG.info("Force Killing {}:{}", supervisorId, workerId);
         numForceKill.mark();
         try {
             Set<Long> pids = getAllPids();
@@ -236,9 +234,9 @@
      * @throws IOException on any error
      */
     public LSWorkerHeartbeat readHeartbeat() throws IOException {
-        LocalState localState = ConfigUtils.workerState(_conf, _workerId);
+        LocalState localState = ConfigUtils.workerState(conf, workerId);
         LSWorkerHeartbeat hb = localState.getWorkerHeartBeat();
-        LOG.trace("{}: Reading heartbeat {}", _workerId, hb);
+        LOG.trace("{}: Reading heartbeat {}", workerId, hb);
         return hb;
     }
 
@@ -319,7 +317,7 @@
         for (Long pid : pids) {
             LOG.debug("Checking if pid {} owner {} is alive", pid, user);
             if (!isProcessAlive(pid, user)) {
-                LOG.debug("{}: PID {} is dead", _workerId, pid);
+                LOG.debug("{}: PID {} is dead", workerId, pid);
             } else {
                 allDead = false;
                 break;
@@ -337,7 +335,7 @@
     @Override
     public void cleanUp() throws IOException {
         try (Timer.Context t = cleanupDuration.time()) {
-            containerMemoryTracker.remove(_port);
+            containerMemoryTracker.remove(port);
             cleanUpForRestart();
         } catch (IOException e) {
             //This may or may not be reported depending on when process exits
@@ -353,23 +351,23 @@
      * @throws IOException on any error
      */
     protected void setup() throws IOException {
-        _type.assertFull();
-        if (!_ops.doRequiredTopoFilesExist(_conf, _topologyId)) {
+        type.assertFull();
+        if (!ops.doRequiredTopoFilesExist(conf, topologyId)) {
             LOG.info("Missing topology storm code, so can't launch  worker with assignment {} for this supervisor {} on port {} with id {}",
-                _assignment,
-                _supervisorId, _port, _workerId);
+                    assignment,
+                    supervisorId, port, workerId);
             throw new IllegalStateException("Not all needed files are here!!!!");
         }
-        LOG.info("Setting up {}:{}", _supervisorId, _workerId);
+        LOG.info("Setting up {}:{}", supervisorId, workerId);
 
-        _ops.forceMkdir(new File(ConfigUtils.workerPidsRoot(_conf, _workerId)));
-        _ops.forceMkdir(new File(ConfigUtils.workerTmpRoot(_conf, _workerId)));
-        _ops.forceMkdir(new File(ConfigUtils.workerHeartbeatsRoot(_conf, _workerId)));
+        ops.forceMkdir(new File(ConfigUtils.workerPidsRoot(conf, workerId)));
+        ops.forceMkdir(new File(ConfigUtils.workerTmpRoot(conf, workerId)));
+        ops.forceMkdir(new File(ConfigUtils.workerHeartbeatsRoot(conf, workerId)));
 
-        File workerArtifacts = new File(ConfigUtils.workerArtifactsRoot(_conf, _topologyId, _port));
-        if (!_ops.fileExists(workerArtifacts)) {
-            _ops.forceMkdir(workerArtifacts);
-            _ops.setupWorkerArtifactsDir(_assignment.get_owner(), workerArtifacts);
+        File workerArtifacts = new File(ConfigUtils.workerArtifactsRoot(conf, topologyId, port));
+        if (!ops.fileExists(workerArtifacts)) {
+            ops.forceMkdir(workerArtifacts);
+            ops.setupWorkerArtifactsDir(assignment.get_owner(), workerArtifacts);
         }
 
         String user = getWorkerUser();
@@ -387,43 +385,43 @@
      */
     @SuppressWarnings("unchecked")
     protected void writeLogMetadata(String user) throws IOException {
-        _type.assertFull();
+        type.assertFull();
         Map<String, Object> data = new HashMap<>();
         data.put(Config.TOPOLOGY_SUBMITTER_USER, user);
-        data.put("worker-id", _workerId);
+        data.put("worker-id", workerId);
 
         Set<String> logsGroups = new HashSet<>();
-        if (_topoConf.get(DaemonConfig.LOGS_GROUPS) != null) {
-            List<String> groups = (List<String>) _topoConf.get(DaemonConfig.LOGS_GROUPS);
+        if (topoConf.get(DaemonConfig.LOGS_GROUPS) != null) {
+            List<String> groups = (List<String>) topoConf.get(DaemonConfig.LOGS_GROUPS);
             for (String group : groups) {
                 logsGroups.add(group);
             }
         }
-        if (_topoConf.get(Config.TOPOLOGY_GROUPS) != null) {
-            List<String> topGroups = (List<String>) _topoConf.get(Config.TOPOLOGY_GROUPS);
+        if (topoConf.get(Config.TOPOLOGY_GROUPS) != null) {
+            List<String> topGroups = (List<String>) topoConf.get(Config.TOPOLOGY_GROUPS);
             logsGroups.addAll(topGroups);
         }
         data.put(DaemonConfig.LOGS_GROUPS, logsGroups.toArray());
 
         Set<String> logsUsers = new HashSet<>();
-        if (_topoConf.get(DaemonConfig.LOGS_USERS) != null) {
-            List<String> logUsers = (List<String>) _topoConf.get(DaemonConfig.LOGS_USERS);
+        if (topoConf.get(DaemonConfig.LOGS_USERS) != null) {
+            List<String> logUsers = (List<String>) topoConf.get(DaemonConfig.LOGS_USERS);
             for (String logUser : logUsers) {
                 logsUsers.add(logUser);
             }
         }
-        if (_topoConf.get(Config.TOPOLOGY_USERS) != null) {
-            List<String> topUsers = (List<String>) _topoConf.get(Config.TOPOLOGY_USERS);
+        if (topoConf.get(Config.TOPOLOGY_USERS) != null) {
+            List<String> topUsers = (List<String>) topoConf.get(Config.TOPOLOGY_USERS);
             for (String logUser : topUsers) {
                 logsUsers.add(logUser);
             }
         }
         data.put(DaemonConfig.LOGS_USERS, logsUsers.toArray());
 
-        File file = ServerConfigUtils.getLogMetaDataFile(_conf, _topologyId, _port);
+        File file = ServerConfigUtils.getLogMetaDataFile(conf, topologyId, port);
 
         Yaml yaml = new Yaml();
-        try (Writer writer = _ops.getWriter(file)) {
+        try (Writer writer = ops.getWriter(file)) {
             yaml.dump(data, writer);
         }
     }
@@ -434,13 +432,13 @@
      * @throws IOException on any error
      */
     protected void createArtifactsLink() throws IOException {
-        _type.assertFull();
-        if (!_symlinksDisabled) {
-            File workerDir = new File(ConfigUtils.workerRoot(_conf, _workerId));
-            File topoDir = new File(ConfigUtils.workerArtifactsRoot(_conf, _topologyId, _port));
-            if (_ops.fileExists(workerDir)) {
-                LOG.debug("Creating symlinks for worker-id: {} topology-id: {} to its port artifacts directory", _workerId, _topologyId);
-                _ops.createSymlink(new File(workerDir, "artifacts"), topoDir);
+        type.assertFull();
+        if (!symlinksDisabled) {
+            File workerDir = new File(ConfigUtils.workerRoot(conf, workerId));
+            File topoDir = new File(ConfigUtils.workerArtifactsRoot(conf, topologyId, port));
+            if (ops.fileExists(workerDir)) {
+                LOG.debug("Creating symlinks for worker-id: {} topology-id: {} to its port artifacts directory", workerId, topologyId);
+                ops.createSymlink(new File(workerDir, "artifacts"), topoDir);
             }
         }
     }
@@ -451,12 +449,12 @@
      * @throws IOException on any error.
      */
     protected void createBlobstoreLinks() throws IOException {
-        _type.assertFull();
-        String stormRoot = ConfigUtils.supervisorStormDistRoot(_conf, _topologyId);
-        String workerRoot = ConfigUtils.workerRoot(_conf, _workerId);
+        type.assertFull();
+        String stormRoot = ConfigUtils.supervisorStormDistRoot(conf, topologyId);
+        String workerRoot = ConfigUtils.workerRoot(conf, workerId);
 
         @SuppressWarnings("unchecked")
-        Map<String, Map<String, Object>> blobstoreMap = (Map<String, Map<String, Object>>) _topoConf.get(Config.TOPOLOGY_BLOBSTORE_MAP);
+        Map<String, Map<String, Object>> blobstoreMap = (Map<String, Map<String, Object>>) topoConf.get(Config.TOPOLOGY_BLOBSTORE_MAP);
         List<String> blobFileNames = new ArrayList<>();
         if (blobstoreMap != null) {
             for (Map.Entry<String, Map<String, Object>> entry : blobstoreMap.entrySet()) {
@@ -478,17 +476,17 @@
         }
         resourceFileNames.addAll(blobFileNames);
 
-        if (!_symlinksDisabled) {
-            LOG.info("Creating symlinks for worker-id: {} storm-id: {} for files({}): {}", _workerId, _topologyId, resourceFileNames.size(),
+        if (!symlinksDisabled) {
+            LOG.info("Creating symlinks for worker-id: {} storm-id: {} for files({}): {}", workerId, topologyId, resourceFileNames.size(),
                 resourceFileNames);
             if (targetResourcesDir.exists()) {
-                _ops.createSymlink(new File(workerRoot, ServerConfigUtils.RESOURCES_SUBDIR), targetResourcesDir);
+                ops.createSymlink(new File(workerRoot, ServerConfigUtils.RESOURCES_SUBDIR), targetResourcesDir);
             } else {
-                LOG.info("Topology jar for worker-id: {} storm-id: {} does not contain re sources directory {}.", _workerId, _topologyId,
+                LOG.info("Topology jar for worker-id: {} storm-id: {} does not contain re sources directory {}.", workerId, topologyId,
                     targetResourcesDir.toString());
             }
             for (String fileName : blobFileNames) {
-                _ops.createSymlink(new File(workerRoot, fileName),
+                ops.createSymlink(new File(workerRoot, fileName),
                     new File(stormRoot, fileName));
             }
         } else if (blobFileNames.size() > 0) {
@@ -497,16 +495,17 @@
     }
 
     /**
+     * Get all PIDs.
      * @return all of the pids that are a part of this container.
      */
     protected Set<Long> getAllPids() throws IOException {
         Set<Long> ret = new HashSet<>();
-        for (String listing : ConfigUtils.readDirContents(ConfigUtils.workerPidsRoot(_conf, _workerId))) {
+        for (String listing : ConfigUtils.readDirContents(ConfigUtils.workerPidsRoot(conf, workerId))) {
             ret.add(Long.valueOf(listing));
         }
 
-        if (_resourceIsolationManager != null) {
-            Set<Long> morePids = _resourceIsolationManager.getRunningPids(_workerId);
+        if (resourceIsolationManager != null) {
+            Set<Long> morePids = resourceIsolationManager.getRunningPids(workerId);
             assert (morePids != null);
             ret.addAll(morePids);
         }
@@ -515,34 +514,35 @@
     }
 
     /**
+     * Get worker user.
      * @return the user that some operations should be done as.
      *
      * @throws IOException on any error
      */
     protected String getWorkerUser() throws IOException {
-        LOG.info("GET worker-user for {}", _workerId);
-        File file = new File(ConfigUtils.workerUserFile(_conf, _workerId));
+        LOG.info("GET worker-user for {}", workerId);
+        File file = new File(ConfigUtils.workerUserFile(conf, workerId));
 
-        if (_ops.fileExists(file)) {
-            return _ops.slurpString(file).trim();
-        } else if (_assignment != null && _assignment.is_set_owner()) {
-            return _assignment.get_owner();
+        if (ops.fileExists(file)) {
+            return ops.slurpString(file).trim();
+        } else if (assignment != null && assignment.is_set_owner()) {
+            return assignment.get_owner();
         }
-        if (ConfigUtils.isLocalMode(_conf)) {
+        if (ConfigUtils.isLocalMode(conf)) {
             return System.getProperty("user.name");
         } else {
-            File f = new File(ConfigUtils.workerArtifactsRoot(_conf));
+            File f = new File(ConfigUtils.workerArtifactsRoot(conf));
             if (f.exists()) {
                 return Files.getOwner(f.toPath()).getName();
             }
-            throw new IllegalStateException("Could not recover the user for " + _workerId);
+            throw new IllegalStateException("Could not recover the user for " + workerId);
         }
     }
 
     /**
      * Returns the user that the worker process is running as.
      *
-     * The default behavior is to launch the worker as the user supervisor is running as (e.g. 'storm')
+     * <p>The default behavior is to launch the worker as the user supervisor is running as (e.g. 'storm')
      *
      * @return the user that the worker process is running as.
      */
@@ -551,14 +551,14 @@
     }
 
     protected void saveWorkerUser(String user) throws IOException {
-        _type.assertFull();
-        LOG.info("SET worker-user {} {}", _workerId, user);
-        _ops.dump(new File(ConfigUtils.workerUserFile(_conf, _workerId)), user);
+        type.assertFull();
+        LOG.info("SET worker-user {} {}", workerId, user);
+        ops.dump(new File(ConfigUtils.workerUserFile(conf, workerId)), user);
     }
 
     protected void deleteSavedWorkerUser() throws IOException {
-        LOG.info("REMOVE worker-user {}", _workerId);
-        _ops.deleteIfExists(new File(ConfigUtils.workerUserFile(_conf, _workerId)));
+        LOG.info("REMOVE worker-user {}", workerId);
+        ops.deleteIfExists(new File(ConfigUtils.workerUserFile(conf, workerId)));
     }
 
     /**
@@ -568,28 +568,28 @@
      * @throws IOException on any error
      */
     public void cleanUpForRestart() throws IOException {
-        LOG.info("Cleaning up {}:{}", _supervisorId, _workerId);
+        LOG.info("Cleaning up {}:{}", supervisorId, workerId);
         Set<Long> pids = getAllPids();
         String user = getWorkerUser();
 
         for (Long pid : pids) {
-            File path = new File(ConfigUtils.workerPidPath(_conf, _workerId, pid));
-            _ops.deleteIfExists(path, user, _workerId);
+            File path = new File(ConfigUtils.workerPidPath(conf, workerId, pid));
+            ops.deleteIfExists(path, user, workerId);
         }
 
         //clean up for resource isolation if enabled
-        if (_resourceIsolationManager != null) {
-            _resourceIsolationManager.releaseResourcesForWorker(_workerId);
+        if (resourceIsolationManager != null) {
+            resourceIsolationManager.releaseResourcesForWorker(workerId);
         }
 
         //Always make sure to clean up everything else before worker directory
         //is removed since that is what is going to trigger the retry for cleanup
-        _ops.deleteIfExists(new File(ConfigUtils.workerHeartbeatsRoot(_conf, _workerId)), user, _workerId);
-        _ops.deleteIfExists(new File(ConfigUtils.workerPidsRoot(_conf, _workerId)), user, _workerId);
-        _ops.deleteIfExists(new File(ConfigUtils.workerTmpRoot(_conf, _workerId)), user, _workerId);
-        _ops.deleteIfExists(new File(ConfigUtils.workerRoot(_conf, _workerId)), user, _workerId);
+        ops.deleteIfExists(new File(ConfigUtils.workerHeartbeatsRoot(conf, workerId)), user, workerId);
+        ops.deleteIfExists(new File(ConfigUtils.workerPidsRoot(conf, workerId)), user, workerId);
+        ops.deleteIfExists(new File(ConfigUtils.workerTmpRoot(conf, workerId)), user, workerId);
+        ops.deleteIfExists(new File(ConfigUtils.workerRoot(conf, workerId)), user, workerId);
         deleteSavedWorkerUser();
-        _workerId = null;
+        workerId = null;
     }
 
     /**
@@ -604,11 +604,11 @@
     }
 
     protected void updateMemoryAccounting() {
-        _type.assertFull();
+        type.assertFull();
         long used = getMemoryUsageMb();
         long reserved = getMemoryReservationMb();
-        containerMemoryTracker.setUsedMemoryMb(_port, _topologyId, used);
-        containerMemoryTracker.setReservedMemoryMb(_port, _topologyId, reserved);
+        containerMemoryTracker.setUsedMemoryMb(port, topologyId, used);
+        containerMemoryTracker.setReservedMemoryMb(port, topologyId, reserved);
     }
 
     /**
@@ -616,7 +616,7 @@
      */
     public long getTotalTopologyMemoryUsed() {
         updateMemoryAccounting();
-        return containerMemoryTracker.getUsedMemoryMb(_topologyId);
+        return containerMemoryTracker.getUsedMemoryMb(topologyId);
     }
 
     /**
@@ -628,7 +628,7 @@
     public long getTotalTopologyMemoryReserved(LocalAssignment withUpdatedLimits) {
         updateMemoryAccounting();
         long ret =
-            containerMemoryTracker.getReservedMemoryMb(_topologyId);
+            containerMemoryTracker.getReservedMemoryMb(topologyId);
         if (withUpdatedLimits.is_set_total_node_shared()) {
             ret += withUpdatedLimits.get_total_node_shared();
         }
@@ -639,7 +639,7 @@
      * Get the number of workers for this topology.
      */
     public long getTotalWorkersForThisTopology() {
-        return containerMemoryTracker.getAssignedWorkerCount(_topologyId);
+        return containerMemoryTracker.getAssignedWorkerCount(topologyId);
     }
 
     /**
@@ -691,7 +691,7 @@
      * Get the id of the container or null if there is no worker id right now.
      */
     public String getWorkerId() {
-        return _workerId;
+        return workerId;
     }
 
     /**
@@ -699,7 +699,7 @@
      */
     void processMetrics(OnlyLatestExecutor<Integer> exec, WorkerMetricsProcessor processor) {
         try {
-            Optional<Long> usedMemoryForPort = containerMemoryTracker.getUsedMemoryMb(_port);
+            Optional<Long> usedMemoryForPort = containerMemoryTracker.getUsedMemoryMb(port);
             if (usedMemoryForPort.isPresent()) {
                 // Make sure we don't process too frequently.
                 long nextMetricProcessTime = this.lastMetricProcessTime + 60L * 1000L;
@@ -712,16 +712,19 @@
 
                 // create metric for memory
                 long timestamp = System.currentTimeMillis();
-                WorkerMetricPoint workerMetric = new WorkerMetricPoint(MEMORY_USED_METRIC, timestamp, usedMemoryForPort.get(), SYSTEM_COMPONENT_ID,
-                    INVALID_EXECUTOR_ID, INVALID_STREAM_ID);
+                WorkerMetricPoint workerMetric = new WorkerMetricPoint(MEMORY_USED_METRIC,
+                        timestamp,
+                        usedMemoryForPort.get(),
+                        SYSTEM_COMPONENT_ID,
+                        INVALID_EXECUTOR_ID, INVALID_STREAM_ID);
 
                 WorkerMetricList metricList = new WorkerMetricList();
                 metricList.add_to_metrics(workerMetric);
-                WorkerMetrics metrics = new WorkerMetrics(_topologyId, _port, hostname, metricList);
+                WorkerMetrics metrics = new WorkerMetrics(topologyId, port, hostname, metricList);
 
-                exec.execute(_port, () -> {
+                exec.execute(port, () -> {
                     try {
-                        processor.processWorkerMetrics(_conf, metrics);
+                        processor.processWorkerMetrics(conf, metrics);
                     } catch (MetricException e) {
                         LOG.error("Failed to process metrics", e);
                     }
@@ -734,31 +737,31 @@
         }
     }
 
-    public static enum ContainerType {
+    public enum ContainerType {
         LAUNCH(false, false),
         RECOVER_FULL(true, false),
         RECOVER_PARTIAL(true, true);
 
-        private final boolean _recovery;
-        private final boolean _onlyKillable;
+        private final boolean recovery;
+        private final boolean onlyKillable;
 
         ContainerType(boolean recovery, boolean onlyKillable) {
-            _recovery = recovery;
-            _onlyKillable = onlyKillable;
+            this.recovery = recovery;
+            this.onlyKillable = onlyKillable;
         }
 
         public boolean isRecovery() {
-            return _recovery;
+            return recovery;
         }
 
         public void assertFull() {
-            if (_onlyKillable) {
+            if (onlyKillable) {
                 throw new IllegalStateException("Container is only Killable.");
             }
         }
 
         public boolean isOnlyKillable() {
-            return _onlyKillable;
+            return onlyKillable;
         }
     }
 }
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerLauncher.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerLauncher.java
index b310018..d5ae161 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerLauncher.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerLauncher.java
@@ -28,7 +28,7 @@
 import org.slf4j.LoggerFactory;
 
 /**
- * Launches containers
+ * Launches containers.
  */
 public abstract class ContainerLauncher {
     private static final Logger LOG = LoggerFactory.getLogger(ContainerLauncher.class);
@@ -76,7 +76,7 @@
     }
 
     /**
-     * Launch a container in a given slot
+     * Launch a container in a given slot.
      * @param port the port to run this on
      * @param assignment what to launch
      * @param state the current state of the supervisor
@@ -86,7 +86,7 @@
     public abstract Container launchContainer(int port, LocalAssignment assignment, LocalState state) throws IOException;
 
     /**
-     * Recover a container for a running process
+     * Recover a container for a running process.
      * @param port the port the assignment is running on
      * @param assignment the assignment that was launched
      * @param state the current state of the supervisor
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerMemoryTracker.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerMemoryTracker.java
index b5fc1f8..e79794e 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerMemoryTracker.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ContainerMemoryTracker.java
@@ -13,6 +13,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.storm.daemon.supervisor;
 
 import java.util.Optional;
@@ -21,17 +22,14 @@
 
 public class ContainerMemoryTracker {
 
-    private final ConcurrentHashMap<Integer, TopoAndMemory> usedMemory =
-        new ConcurrentHashMap<>();
-    private final ConcurrentHashMap<Integer, TopoAndMemory> reservedMemory =
-        new ConcurrentHashMap<>();
+    private final ConcurrentHashMap<Integer, TopoAndMemory> usedMemory = new ConcurrentHashMap<>();
+    private final ConcurrentHashMap<Integer, TopoAndMemory> reservedMemory = new ConcurrentHashMap<>();
 
     public ContainerMemoryTracker(StormMetricsRegistry metricsRegistry) {
         metricsRegistry.registerGauge(
             "supervisor:current-used-memory-mb",
             () -> {
-                Long val =
-                usedMemory.values().stream().mapToLong((topoAndMem) -> topoAndMem.memory).sum();
+                Long val = usedMemory.values().stream().mapToLong((topoAndMem) -> topoAndMem.memory).sum();
                 int ret = val.intValue();
                 if (val > Integer.MAX_VALUE) { // Would only happen at 2 PB so we are OK for now
                     ret = Integer.MAX_VALUE;
@@ -41,8 +39,7 @@
         metricsRegistry.registerGauge(
             "supervisor:current-reserved-memory-mb",
             () -> {
-                Long val =
-                reservedMemory.values().stream().mapToLong((topoAndMem) -> topoAndMem.memory).sum();
+                Long val = reservedMemory.values().stream().mapToLong((topoAndMem) -> topoAndMem.memory).sum();
                 int ret = val.intValue();
                 if (val > Integer.MAX_VALUE) { // Would only happen at 2 PB so we are OK for now
                     ret = Integer.MAX_VALUE;
@@ -146,7 +143,7 @@
         public final String topoId;
         public final long memory;
 
-        public TopoAndMemory(String id, long mem) {
+        TopoAndMemory(String id, long mem) {
             topoId = id;
             memory = mem;
         }
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Killable.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Killable.java
index 420f277..78fc2de 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Killable.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Killable.java
@@ -21,25 +21,26 @@
      * kill -15 equivalent
      * @throws IOException on any error
      */
-    public void kill() throws IOException;
+    void kill() throws IOException;
 
     /**
      * Kill the processes in this container violently.
      * kill -9 equivalent
      * @throws IOException on any error
      */
-    public void forceKill() throws IOException;
+    void forceKill() throws IOException;
 
     /**
+     * Check whether all processes are dead.
      * @return true if all of the processes are dead, else false
      * @throws IOException on any error
      */
-    public boolean areAllProcessesDead() throws IOException;
+    boolean areAllProcessesDead() throws IOException;
 
     /**
      * Clean up the container. It is not coming back.
      * by default do the same thing as when restarting.
      * @throws IOException on any error
      */
-    public void cleanUp() throws IOException;
+    void cleanUp() throws IOException;
 }
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainer.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainer.java
index 228da84..b74bbf9 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainer.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainer.java
@@ -26,9 +26,9 @@
 
 public class LocalContainer extends Container {
     private static final Logger LOG = LoggerFactory.getLogger(LocalContainer.class);
-    private final IContext _sharedContext;
+    private final IContext sharedContext;
     private final org.apache.storm.generated.Supervisor.Iface localSupervisor;
-    private volatile boolean _isAlive = false;
+    private volatile boolean isAlive = false;
 
     public LocalContainer(Map<String, Object> conf, String supervisorId, int supervisorPort, int port,
                           LocalAssignment assignment, IContext sharedContext, StormMetricsRegistry metricsRegistry,
@@ -36,8 +36,8 @@
                           org.apache.storm.generated.Supervisor.Iface localSupervisor) throws IOException {
         super(ContainerType.LAUNCH, conf, supervisorId, supervisorPort, port, assignment, null, null, null, null, metricsRegistry, 
             containerMemoryTracker);
-        _sharedContext = sharedContext;
-        _workerId = Utils.uuid();
+        this.sharedContext = sharedContext;
+        workerId = Utils.uuid();
         this.localSupervisor = localSupervisor;
     }
 
@@ -53,7 +53,7 @@
 
     @Override
     public void launch() throws IOException {
-        Worker worker = new Worker(_conf, _sharedContext, _topologyId, _supervisorId, _supervisorPort, _port, _workerId,
+        Worker worker = new Worker(conf, sharedContext, topologyId, supervisorId, supervisorPort, port, workerId,
             () -> {
                 return () -> localSupervisor;
             });
@@ -63,21 +63,21 @@
             throw new IOException(e);
         }
         saveWorkerUser(System.getProperty("user.name"));
-        ProcessSimulator.registerProcess(_workerId, worker);
-        _isAlive = true;
+        ProcessSimulator.registerProcess(workerId, worker);
+        isAlive = true;
     }
 
     @Override
     public void kill() throws IOException {
-        ProcessSimulator.killProcess(_workerId);
-        _isAlive = false;
+        ProcessSimulator.killProcess(workerId);
+        isAlive = false;
         //Make sure the worker is down before we try to shoot any child processes
         super.kill();
     }
 
     @Override
     public boolean areAllProcessesDead() throws IOException {
-        return !_isAlive && super.areAllProcessesDead();
+        return !isAlive && super.areAllProcessesDead();
     }
 
     @Override
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainerLauncher.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainerLauncher.java
index d9f3f8d..5434c77 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainerLauncher.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/LocalContainerLauncher.java
@@ -23,10 +23,10 @@
  * Launch Containers in local mode.
  */
 public class LocalContainerLauncher extends ContainerLauncher {
-    private final Map<String, Object> _conf;
-    private final String _supervisorId;
-    private final int _supervisorPort;
-    private final IContext _sharedContext;
+    private final Map<String, Object> conf;
+    private final String supervisorId;
+    private final int supervisorPort;
+    private final IContext sharedContext;
     private final StormMetricsRegistry metricsRegistry;
     private final ContainerMemoryTracker containerMemoryTracker;
     private final org.apache.storm.generated.Supervisor.Iface localSupervisor;
@@ -35,10 +35,10 @@
                                   IContext sharedContext, StormMetricsRegistry metricsRegistry, 
                                   ContainerMemoryTracker containerMemoryTracker,
                                   org.apache.storm.generated.Supervisor.Iface localSupervisor) {
-        _conf = conf;
-        _supervisorId = supervisorId;
-        _supervisorPort = supervisorPort;
-        _sharedContext = sharedContext;
+        this.conf = conf;
+        this.supervisorId = supervisorId;
+        this.supervisorPort = supervisorPort;
+        this.sharedContext = sharedContext;
         this.metricsRegistry = metricsRegistry;
         this.containerMemoryTracker = containerMemoryTracker;
         this.localSupervisor = localSupervisor;
@@ -46,8 +46,8 @@
 
     @Override
     public Container launchContainer(int port, LocalAssignment assignment, LocalState state) throws IOException {
-        LocalContainer ret = new LocalContainer(_conf, _supervisorId, _supervisorPort,
-            port, assignment, _sharedContext, metricsRegistry, containerMemoryTracker, localSupervisor);
+        LocalContainer ret = new LocalContainer(conf, supervisorId, supervisorPort,
+            port, assignment, sharedContext, metricsRegistry, containerMemoryTracker, localSupervisor);
         ret.setup();
         ret.launch();
         return ret;
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/OnlyLatestExecutor.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/OnlyLatestExecutor.java
index a0d35d9..81f3818 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/OnlyLatestExecutor.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/OnlyLatestExecutor.java
@@ -39,7 +39,7 @@
     }
 
     /**
-     * Run something in the future, but replace it with the latest if it is taking too long
+     * Run something in the future, but replace it with the latest if it is taking too long.
      *
      * @param key what to use to dedupe things.
      * @param r   what you want to run.
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ReadClusterState.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ReadClusterState.java
index 6b18bbe..7337927 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ReadClusterState.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/ReadClusterState.java
@@ -62,7 +62,7 @@
     private final AtomicInteger readRetry = new AtomicInteger(0);
     private final String assignmentId;
     private final int supervisorPort;
-    private final ISupervisor iSuper;
+    private final ISupervisor supervisor;
     private final AsyncLocalizer localizer;
     private final ContainerLauncher launcher;
     private final String host;
@@ -77,7 +77,7 @@
         this.stormClusterState = supervisor.getStormClusterState();
         this.assignmentId = supervisor.getAssignmentId();
         this.supervisorPort = supervisor.getThriftServerPort();
-        this.iSuper = supervisor.getiSupervisor();
+        this.supervisor = supervisor.getiSupervisor();
         this.localizer = supervisor.getAsyncLocalizer();
         this.host = supervisor.getHostName();
         this.localState = supervisor.getLocalState();
@@ -126,7 +126,7 @@
 
     private Slot mkSlot(int port) throws Exception {
         return new Slot(localizer, superConf, launcher, host, port,
-                        localState, stormClusterState, iSuper, cachedAssignments, metricsExec, metricsProcessor, slotMetrics);
+                        localState, stormClusterState, supervisor, cachedAssignments, metricsExec, metricsProcessor, slotMetrics);
     }
 
     @Override
@@ -147,12 +147,12 @@
             LOG.debug("All assignment: {}", allAssignments);
             LOG.debug("Topology Ids -> Profiler Actions {}", topoIdToProfilerActions);
             for (Integer port : allAssignments.keySet()) {
-                if (iSuper.confirmAssigned(port)) {
+                if (supervisor.confirmAssigned(port)) {
                     assignedPorts.add(port);
                 }
             }
             HashSet<Integer> allPorts = new HashSet<>(assignedPorts);
-            iSuper.assigned(allPorts);
+            supervisor.assigned(allPorts);
             allPorts.addAll(slots.keySet());
 
             Map<Integer, Set<TopoProfileAction>> filtered = new HashMap<>();
@@ -207,7 +207,7 @@
 
     protected Map<Integer, LocalAssignment> readAssignments(Map<String, Assignment> assignmentsSnapshot) {
         try {
-            Map<Integer, LocalAssignment> portLA = new HashMap<>();
+            Map<Integer, LocalAssignment> portLocalAssignment = new HashMap<>();
             for (Map.Entry<String, Assignment> assignEntry : assignmentsSnapshot.entrySet()) {
                 String topoId = assignEntry.getKey();
                 Assignment assignment = assignEntry.getValue();
@@ -220,16 +220,16 @@
 
                     LocalAssignment la = entry.getValue();
 
-                    if (!portLA.containsKey(port)) {
-                        portLA.put(port, la);
+                    if (!portLocalAssignment.containsKey(port)) {
+                        portLocalAssignment.put(port, la);
                     } else {
                         throw new RuntimeException("Should not have multiple topologies assigned to one port "
-                                                   + port + " " + la + " " + portLA);
+                                                   + port + " " + la + " " + portLocalAssignment);
                     }
                 }
             }
             readRetry.set(0);
-            return portLA;
+            return portLocalAssignment;
         } catch (RuntimeException e) {
             if (readRetry.get() > 2) {
                 throw e;
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainer.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainer.java
index dae4826..b2681ea 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainer.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainer.java
@@ -60,7 +60,7 @@
         List<String> commands = Arrays.asList("signal", String.valueOf(pid), String.valueOf(signal));
         String user = getWorkerUser();
         String logPrefix = "kill -" + signal + " " + pid;
-        ClientSupervisorUtils.processLauncherAndWait(_conf, user, commands, null, logPrefix);
+        ClientSupervisorUtils.processLauncherAndWait(conf, user, commands, null, logPrefix);
     }
 
     @Override
@@ -81,15 +81,15 @@
         LOG.info("Running as user: {} command: {}", user, command);
         String containerFile = ServerUtils.containerFilePath(td);
         if (Utils.checkFileExists(containerFile)) {
-            SupervisorUtils.rmrAsUser(_conf, containerFile, containerFile);
+            SupervisorUtils.rmrAsUser(conf, containerFile, containerFile);
         }
         String scriptFile = ServerUtils.scriptFilePath(td);
         if (Utils.checkFileExists(scriptFile)) {
-            SupervisorUtils.rmrAsUser(_conf, scriptFile, scriptFile);
+            SupervisorUtils.rmrAsUser(conf, scriptFile, scriptFile);
         }
         String script = ServerUtils.writeScript(td, command, env);
         List<String> args = Arrays.asList("profiler", td, script);
-        int ret = ClientSupervisorUtils.processLauncherAndWait(_conf, user, args, env, logPrefix);
+        int ret = ClientSupervisorUtils.processLauncherAndWait(conf, user, args, env, logPrefix);
         return ret == 0;
     }
 
@@ -100,10 +100,10 @@
         String user = this.getWorkerUser();
         List<String> args = Arrays.asList("worker", workerDir, ServerUtils.writeScript(workerDir, command, env));
         List<String> commandPrefix = null;
-        if (_resourceIsolationManager != null) {
-            commandPrefix = _resourceIsolationManager.getLaunchCommandPrefix(_workerId);
+        if (resourceIsolationManager != null) {
+            commandPrefix = resourceIsolationManager.getLaunchCommandPrefix(workerId);
         }
-        ClientSupervisorUtils.processLauncher(_conf, user, commandPrefix, args, null, logPrefix, processExitCallback, targetDir);
+        ClientSupervisorUtils.processLauncher(conf, user, commandPrefix, args, null, logPrefix, processExitCallback, targetDir);
     }
 
     /**
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainerLauncher.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainerLauncher.java
index 2e7aa9c..7706662 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainerLauncher.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/RunAsUserContainerLauncher.java
@@ -21,28 +21,28 @@
 import org.apache.storm.utils.LocalState;
 
 public class RunAsUserContainerLauncher extends ContainerLauncher {
-    protected final ResourceIsolationInterface _resourceIsolationManager;
-    private final Map<String, Object> _conf;
-    private final String _supervisorId;
-    private final int _supervisorPort;
+    protected final ResourceIsolationInterface resourceIsolationManager;
+    private final Map<String, Object> conf;
+    private final String supervisorId;
+    private final int supervisorPort;
     private final StormMetricsRegistry metricsRegistry;
     private final ContainerMemoryTracker containerMemoryTracker;
 
     public RunAsUserContainerLauncher(Map<String, Object> conf, String supervisorId, int supervisorPort,
                                       ResourceIsolationInterface resourceIsolationManager, StormMetricsRegistry metricsRegistry, 
                                       ContainerMemoryTracker containerMemoryTracker) throws IOException {
-        _conf = conf;
-        _supervisorId = supervisorId;
-        _supervisorPort = supervisorPort;
-        _resourceIsolationManager = resourceIsolationManager;
+        this.conf = conf;
+        this.supervisorId = supervisorId;
+        this.supervisorPort = supervisorPort;
+        this.resourceIsolationManager = resourceIsolationManager;
         this.metricsRegistry = metricsRegistry;
         this.containerMemoryTracker = containerMemoryTracker;
     }
 
     @Override
     public Container launchContainer(int port, LocalAssignment assignment, LocalState state) throws IOException {
-        Container container = new RunAsUserContainer(ContainerType.LAUNCH, _conf, _supervisorId, _supervisorPort, port,
-            assignment, _resourceIsolationManager, state, null, metricsRegistry, containerMemoryTracker, null, null, null);
+        Container container = new RunAsUserContainer(ContainerType.LAUNCH, conf, supervisorId, supervisorPort, port,
+            assignment, resourceIsolationManager, state, null, metricsRegistry, containerMemoryTracker, null, null, null);
         container.setup();
         container.launch();
         return container;
@@ -50,15 +50,15 @@
 
     @Override
     public Container recoverContainer(int port, LocalAssignment assignment, LocalState state) throws IOException {
-        return new RunAsUserContainer(ContainerType.RECOVER_FULL, _conf, _supervisorId, _supervisorPort, port,
-            assignment, _resourceIsolationManager, state, null, metricsRegistry, containerMemoryTracker,
+        return new RunAsUserContainer(ContainerType.RECOVER_FULL, conf, supervisorId, supervisorPort, port,
+            assignment, resourceIsolationManager, state, null, metricsRegistry, containerMemoryTracker,
             null, null, null);
     }
 
     @Override
     public Killable recoverContainer(String workerId, LocalState localState) throws IOException {
-        return new RunAsUserContainer(ContainerType.RECOVER_PARTIAL, _conf, _supervisorId, _supervisorPort, -1, null,
-            _resourceIsolationManager, localState, workerId, metricsRegistry, containerMemoryTracker,
+        return new RunAsUserContainer(ContainerType.RECOVER_PARTIAL, conf, supervisorId, supervisorPort, -1, null,
+                resourceIsolationManager, localState, workerId, metricsRegistry, containerMemoryTracker,
             null, null, null);
     }
 
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Slot.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Slot.java
index 60d5e61..c8e6f19 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Slot.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Slot.java
@@ -12,8 +12,6 @@
 
 package org.apache.storm.daemon.supervisor;
 
-import com.codahale.metrics.Meter;
-import com.codahale.metrics.Timer;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.HashMap;
@@ -42,7 +40,6 @@
 import org.apache.storm.localizer.BlobChangingCallback;
 import org.apache.storm.localizer.GoodToGo;
 import org.apache.storm.localizer.LocallyCachedBlob;
-import org.apache.storm.metric.StormMetricsRegistry;
 import org.apache.storm.metricstore.WorkerMetricsProcessor;
 import org.apache.storm.scheduler.ISupervisor;
 import org.apache.storm.shade.com.google.common.annotations.VisibleForTesting;
@@ -83,7 +80,7 @@
                 ContainerLauncher containerLauncher, String host,
                 int port, LocalState localState,
                 IStormClusterState clusterState,
-                ISupervisor iSupervisor,
+                ISupervisor supervisor,
                 AtomicReference<Map<Long, LocalAssignment>> cachedCurrentAssignments,
                 OnlyLatestExecutor<Integer> metricsExec,
                 WorkerMetricsProcessor metricsProcessor,
@@ -100,7 +97,7 @@
                 containerLauncher,
                 host,
                 port,
-                iSupervisor,
+                supervisor,
                 localState,
                 this,
                 metricsExec, metricsProcessor, slotMetrics);
@@ -179,27 +176,27 @@
     /**
      * Decide the equivalence of two local assignments, ignoring the order of executors
      * This is different from #equal method.
-     * @param a Local assignment A
-     * @param b Local assignment B
+     * @param first Local assignment A
+     * @param second Local assignment B
      * @return True if A and B are equivalent, ignoring the order of the executors
      */
     @VisibleForTesting
-    static boolean equivalent(LocalAssignment a, LocalAssignment b) {
-        if (a == null && b == null) {
+    static boolean equivalent(LocalAssignment first, LocalAssignment second) {
+        if (first == null && second == null) {
             return true;
         }
-        if (a != null && b != null) {
-            if (a.get_topology_id().equals(b.get_topology_id())) {
-                Set<ExecutorInfo> aexec = new HashSet<>(a.get_executors());
-                Set<ExecutorInfo> bexec = new HashSet<>(b.get_executors());
+        if (first != null && second != null) {
+            if (first.get_topology_id().equals(second.get_topology_id())) {
+                Set<ExecutorInfo> aexec = new HashSet<>(first.get_executors());
+                Set<ExecutorInfo> bexec = new HashSet<>(second.get_executors());
                 if (aexec.equals(bexec)) {
-                    boolean aHasResources = a.is_set_resources();
-                    boolean bHasResources = b.is_set_resources();
-                    if (!aHasResources && !bHasResources) {
+                    boolean firstHasResources = first.is_set_resources();
+                    boolean secondHasResources = second.is_set_resources();
+                    if (!firstHasResources && !secondHasResources) {
                         return true;
                     }
-                    if (aHasResources && bHasResources) {
-                        if (a.get_resources().equals(b.get_resources())) {
+                    if (firstHasResources && secondHasResources) {
+                        if (first.get_resources().equals(second.get_resources())) {
                             return true;
                         }
                     }
@@ -241,7 +238,8 @@
      * @return the next state
      * @throws IOException on any error
      */
-    private static DynamicState prepareForNewAssignmentNoWorkersRunning(DynamicState dynamicState, StaticState staticState) throws IOException {
+    private static DynamicState prepareForNewAssignmentNoWorkersRunning(DynamicState dynamicState,
+            StaticState staticState) throws IOException {
         assert (dynamicState.container == null);
         assert dynamicState.currentAssignment == null;
 
@@ -264,7 +262,7 @@
         Boolean isDead = dynamicState.container.areAllProcessesDead();
         if (!isDead) {
             if (reason == KillReason.ASSIGNMENT_CHANGED || reason == KillReason.BLOB_CHANGED) {
-                staticState.iSupervisor.killedWorker(staticState.port);
+                staticState.supervisor.killedWorker(staticState.port);
             }
             dynamicState.container.kill();
         }
@@ -332,7 +330,8 @@
     /**
      * Drop all of the changingBlobs and pendingChangingBlobs.
      * 
-     * PRECONDITION: container is null
+     * <p>PRECONDITION: container is null
+     *
      * @param dynamicState current state.
      * @return the next state.
      */
@@ -356,8 +355,9 @@
      * Informs the async localizer for all of blobs that the worker acknowledged the change of blobs.
      * Worker has stop as of now.
      *
-     * PRECONDITION: container is null
+     * <p>PRECONDITION: container is null
      * PRECONDITION: changingBlobs should only be for the given assignment.
+     *
      * @param dynamicState the current state
      * @return the futures for the current assignment.
      */
@@ -486,7 +486,7 @@
     /**
      * State Transitions for WAITING_FOR_BLOB_UPDATE state.
      *
-     * PRECONDITION: container is null
+     * <p>PRECONDITION: container is null
      * PRECONDITION: pendingChangingBlobs is not empty (otherwise why did we go to this state)
      * PRECONDITION: pendingChangingBlobsAssignment is not null.
      *
@@ -560,9 +560,11 @@
 
         if (dynamicState.container.areAllProcessesDead()) {
             LOG.info("SLOT {} all processes are dead...", staticState.port);
-            return cleanupCurrentContainer(dynamicState, staticState,
-                                           dynamicState.pendingLocalization ==
-                                           null ? MachineState.EMPTY : MachineState.WAITING_FOR_BLOB_LOCALIZATION);
+            return cleanupCurrentContainer(dynamicState,
+                    staticState,
+                    dynamicState.pendingLocalization == null
+                            ? MachineState.EMPTY
+                            : MachineState.WAITING_FOR_BLOB_LOCALIZATION);
         }
 
         LOG.warn("SLOT {} force kill and wait...", staticState.port);
@@ -836,7 +838,9 @@
      * @param newAssignment the new assignment for this slot to run, null to run nothing
      */
     public final void setNewAssignment(LocalAssignment newAssignment) {
-        this.newAssignment.set(newAssignment == null ? null : new TimerDecoratedAssignment(newAssignment, staticState.slotMetrics.workerLaunchDuration));
+        this.newAssignment.set(newAssignment == null
+                ? null
+                : new TimerDecoratedAssignment(newAssignment, staticState.slotMetrics.workerLaunchDuration));
     }
 
     @Override
@@ -908,7 +912,6 @@
         try {
             while (!done) {
                 Set<TopoProfileAction> origProfileActions = new HashSet<>(profiling.get());
-                Set<TopoProfileAction> removed = new HashSet<>(origProfileActions);
 
                 Set<BlobChanging> changingResourcesToHandle = dynamicState.changingBlobs;
                 if (!changingBlobs.isEmpty()) {
@@ -919,8 +922,8 @@
                     //Remove/Clean up changed requests that are not for us
                     while (it.hasNext()) {
                         BlobChanging rc = it.next();
-                        if (!forSameTopology(rc.assignment, dynamicState.currentAssignment) &&
-                            !forSameTopology(rc.assignment, dynamicState.newAssignment)) {
+                        if (!forSameTopology(rc.assignment, dynamicState.currentAssignment)
+                                && !forSameTopology(rc.assignment, dynamicState.newAssignment)) {
                             rc.latch.countDown(); //Ignore the future
                             it.remove();
                         }
@@ -936,16 +939,20 @@
                     LOG.info("STATE {} -> {}", dynamicState, nextState);
                 }
                 //Save the current state for recovery
-                if ((nextState.currentAssignment != null && !nextState.currentAssignment.equals(dynamicState.currentAssignment)) ||
-                    (dynamicState.currentAssignment != null && !dynamicState.currentAssignment.equals(nextState.currentAssignment))) {
+                if ((nextState.currentAssignment != null
+                                && !nextState.currentAssignment.equals(dynamicState.currentAssignment))
+                        || (dynamicState.currentAssignment != null
+                                && !dynamicState.currentAssignment.equals(nextState.currentAssignment))) {
                     LOG.info("SLOT {}: Changing current assignment from {} to {}", staticState.port, dynamicState.currentAssignment,
                              nextState.currentAssignment);
                     saveNewAssignment(nextState.currentAssignment);
                 }
 
                 if (equivalent(nextState.newAssignment, nextState.currentAssignment)
-                    && nextState.currentAssignment != null && nextState.currentAssignment.get_owner() == null
-                    && nextState.newAssignment != null && nextState.newAssignment.get_owner() != null) {
+                        && nextState.currentAssignment != null
+                        && nextState.currentAssignment.get_owner() == null
+                        && nextState.newAssignment != null
+                        && nextState.newAssignment.get_owner() != null) {
                     //This is an odd case for a rolling upgrade where the user on the old assignment may be null,
                     // but not on the new one.  Although in all other ways they are the same.
                     // If this happens we want to use the assignment with the owner.
@@ -955,6 +962,7 @@
                 }
 
                 // clean up the profiler actions that are not being processed
+                Set<TopoProfileAction> removed = new HashSet<>(origProfileActions);
                 removed.removeAll(dynamicState.profileActions);
                 removed.removeAll(dynamicState.pendingStopProfileActions);
                 for (TopoProfileAction action : removed) {
@@ -964,7 +972,8 @@
                         LOG.error("Error trying to remove profiling request, it will be retried", e);
                     }
                 }
-                Set<TopoProfileAction> orig, copy;
+                Set<TopoProfileAction> orig;
+                Set<TopoProfileAction> copy;
                 do {
                     orig = profiling.get();
                     copy = new HashSet<>(orig);
@@ -1036,7 +1045,7 @@
         public final ContainerLauncher containerLauncher;
         public final int port;
         public final String host;
-        public final ISupervisor iSupervisor;
+        public final ISupervisor supervisor;
         public final LocalState localState;
         public final BlobChangingCallback changingCallback;
         public final OnlyLatestExecutor<Integer> metricsExec;
@@ -1046,7 +1055,7 @@
         StaticState(AsyncLocalizer localizer, long hbTimeoutMs, long firstHbTimeoutMs,
                     long killSleepMs, long monitorFreqMs,
                     ContainerLauncher containerLauncher, String host, int port,
-                    ISupervisor iSupervisor, LocalState localState,
+                    ISupervisor supervisor, LocalState localState,
                     BlobChangingCallback changingCallback,
                     OnlyLatestExecutor<Integer> metricsExec,
                     WorkerMetricsProcessor metricsProcessor,
@@ -1059,7 +1068,7 @@
             this.monitorFreqMs = monitorFreqMs;
             this.host = host;
             this.port = port;
-            this.iSupervisor = iSupervisor;
+            this.supervisor = supervisor;
             this.localState = localState;
             this.changingCallback = changingCallback;
             this.metricsExec = metricsExec;
@@ -1117,7 +1126,7 @@
         public final long startTime;
         private final SlotMetrics slotMetrics;
 
-        public DynamicState(final LocalAssignment currentAssignment, Container container, final LocalAssignment newAssignment,
+        DynamicState(final LocalAssignment currentAssignment, Container container, final LocalAssignment newAssignment,
             SlotMetrics slotMetrics) {
             this.currentAssignment = currentAssignment;
             this.container = container;
@@ -1144,7 +1153,7 @@
             this.slotMetrics = slotMetrics;
         }
 
-        public DynamicState(final MachineState state, final LocalAssignment newAssignment,
+        DynamicState(final MachineState state, final LocalAssignment newAssignment,
                             final Container container, final LocalAssignment currentAssignment,
                             final LocalAssignment pendingLocalization, final long startTime,
                             final Future<Void> pendingDownload, final Set<TopoProfileAction> profileActions,
@@ -1286,7 +1295,7 @@
         public final String topoId;
         public final ProfileRequest request;
 
-        public TopoProfileAction(String topoId, ProfileRequest request) {
+        TopoProfileAction(String topoId, ProfileRequest request) {
             this.topoId = topoId;
             this.request = request;
         }
@@ -1319,7 +1328,7 @@
         private final LocallyCachedBlob blob;
         private final GoodToGo.GoodToGoLatch latch;
 
-        public BlobChanging(LocalAssignment assignment, LocallyCachedBlob blob, GoodToGo.GoodToGoLatch latch) {
+        BlobChanging(LocalAssignment assignment, LocallyCachedBlob blob, GoodToGo.GoodToGoLatch latch) {
             this.assignment = assignment;
             this.blob = blob;
             this.latch = latch;
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Supervisor.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Supervisor.java
index 9bc4f7f..259a13e 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Supervisor.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/Supervisor.java
@@ -84,6 +84,7 @@
     private final Map<String, Object> conf;
     private final IContext sharedContext;
     private final IAuthorizer authorizationHandler;
+    @SuppressWarnings("checkstyle:MemberName")
     private final ISupervisor iSupervisor;
     private final Utils.UptimeComputer upTime;
     private final String stormVersion;
@@ -115,6 +116,7 @@
     //Passed to workers in local clusters, exposed by thrift server in distributed mode
     private org.apache.storm.generated.Supervisor.Iface supervisorThriftInterface;
 
+    @SuppressWarnings("checkstyle:ParameterName")
     private Supervisor(ISupervisor iSupervisor, StormMetricsRegistry metricsRegistry)
         throws IOException, IllegalAccessException, InstantiationException, ClassNotFoundException {
         this(ConfigUtils.readStormConfig(), null, iSupervisor, metricsRegistry);
@@ -126,8 +128,8 @@
      * @param conf          config
      * @param sharedContext {@link IContext}
      * @param iSupervisor   {@link ISupervisor}
-     * @throws IOException
      */
+    @SuppressWarnings("checkstyle:ParameterName")
     public Supervisor(Map<String, Object> conf, IContext sharedContext, ISupervisor iSupervisor, StormMetricsRegistry metricsRegistry)
         throws IOException, IllegalAccessException, ClassNotFoundException, InstantiationException {
         this.conf = conf;
@@ -186,8 +188,6 @@
 
     /**
      * supervisor daemon enter entrance.
-     *
-     * @param args
      */
     public static void main(String[] args) throws Exception {
         Utils.setupDefaultUncaughtExceptionHandler();
@@ -367,7 +367,6 @@
     @VisibleForTesting
     public void checkAuthorization(String topoName, Map<String, Object> topoConf, String operation, ReqContext context)
         throws AuthorizationException {
-        IAuthorizer aclHandler = authorizationHandler;
         if (context == null) {
             context = ReqContext.context();
         }
@@ -384,6 +383,7 @@
             throw new WrappedAuthorizationException("Supervisor does not support impersonation");
         }
 
+        IAuthorizer aclHandler = authorizationHandler;
         if (aclHandler != null) {
             if (!aclHandler.permit(context, operation, checkConf)) {
                 ThriftAccessLogger.logAccess(context.requestID(), context.remoteAddress(), context.principal(),
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/SupervisorUtils.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/SupervisorUtils.java
index 4619aeb..a0d0397 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/SupervisorUtils.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/SupervisorUtils.java
@@ -64,10 +64,7 @@
 
     /**
      * Given the blob information returns the value of the uncompress field, handling it either being a string or a boolean value, or if
-     * it's not specified then returns false
-     *
-     * @param blobInfo
-     * @return
+     * it's not specified then returns false.
      */
     public static boolean shouldUncompressBlob(Map<String, Object> blobInfo) {
         return ObjectReader.getBoolean(blobInfo.get("uncompress"), false);
@@ -85,10 +82,7 @@
     }
 
     /**
-     * Returns a list of LocalResources based on the blobstore-map passed in
-     *
-     * @param blobstoreMap
-     * @return
+     * Returns a list of LocalResources based on the blobstore-map passed in.
      */
     public static List<LocalResource> blobstoreMapToLocalresources(Map<String, Map<String, Object>> blobstoreMap) {
         List<LocalResource> localResourceList = new ArrayList<>();
@@ -109,10 +103,7 @@
     }
 
     /**
-     * map from worker id to heartbeat
-     *
-     * @param conf
-     * @return
+     * Map from worker id to heartbeat.
      *
      */
     public static Map<String, LSWorkerHeartbeat> readWorkerHeartbeats(Map<String, Object> conf) {
@@ -120,11 +111,7 @@
     }
 
     /**
-     * get worker heartbeat by workerId.
-     *
-     * @param conf
-     * @param workerId
-     * @return
+     * Get worker heartbeat by workerId.
      */
     private static LSWorkerHeartbeat readWorkerHeartbeat(Map<String, Object> conf, String workerId) {
         return _instance.readWorkerHeartbeatImpl(conf, workerId);
diff --git a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/UniFunc.java b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/UniFunc.java
index e531cb6..b0dfd94 100644
--- a/storm-server/src/main/java/org/apache/storm/daemon/supervisor/UniFunc.java
+++ b/storm-server/src/main/java/org/apache/storm/daemon/supervisor/UniFunc.java
@@ -13,5 +13,5 @@
 package org.apache.storm.daemon.supervisor;
 
 public interface UniFunc<T> {
-    public void call(T arg) throws Exception;
+    void call(T arg) throws Exception;
 }
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/AsyncLocalizer.java b/storm-server/src/main/java/org/apache/storm/localizer/AsyncLocalizer.java
index fc80b6b..eaa6384 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/AsyncLocalizer.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/AsyncLocalizer.java
@@ -139,43 +139,43 @@
     @VisibleForTesting
     LocallyCachedBlob getTopoJar(final String topologyId, String owner) {
         return topologyBlobs.computeIfAbsent(ConfigUtils.masterStormJarKey(topologyId),
-                                             (tjk) -> {
-                                                 try {
-                                                     return new LocallyCachedTopologyBlob(topologyId, isLocalMode, conf, fsOps,
-                                                                                          LocallyCachedTopologyBlob.TopologyBlobType
-                                                                                              .TOPO_JAR, owner, metricsRegistry);
-                                                 } catch (IOException e) {
-                                                     throw new RuntimeException(e);
-                                                 }
-                                             });
+            (tjk) -> {
+                try {
+                    return new LocallyCachedTopologyBlob(topologyId, isLocalMode, conf, fsOps,
+                                                      LocallyCachedTopologyBlob.TopologyBlobType
+                                                          .TOPO_JAR, owner, metricsRegistry);
+                } catch (IOException e) {
+                    throw new RuntimeException(e);
+                }
+            });
     }
 
     @VisibleForTesting
     LocallyCachedBlob getTopoCode(final String topologyId, String owner) {
         return topologyBlobs.computeIfAbsent(ConfigUtils.masterStormCodeKey(topologyId),
-                                             (tck) -> {
-                                                 try {
-                                                     return new LocallyCachedTopologyBlob(topologyId, isLocalMode, conf, fsOps,
-                                                                                          LocallyCachedTopologyBlob.TopologyBlobType
-                                                                                              .TOPO_CODE, owner, metricsRegistry);
-                                                 } catch (IOException e) {
-                                                     throw new RuntimeException(e);
-                                                 }
-                                             });
+            (tck) -> {
+                try {
+                    return new LocallyCachedTopologyBlob(topologyId, isLocalMode, conf, fsOps,
+                                                      LocallyCachedTopologyBlob.TopologyBlobType
+                                                          .TOPO_CODE, owner, metricsRegistry);
+                } catch (IOException e) {
+                    throw new RuntimeException(e);
+                }
+            });
     }
 
     @VisibleForTesting
     LocallyCachedBlob getTopoConf(final String topologyId, String owner) {
         return topologyBlobs.computeIfAbsent(ConfigUtils.masterStormConfKey(topologyId),
-                                             (tck) -> {
-                                                 try {
-                                                     return new LocallyCachedTopologyBlob(topologyId, isLocalMode, conf, fsOps,
-                                                                                          LocallyCachedTopologyBlob.TopologyBlobType
-                                                                                              .TOPO_CONF, owner, metricsRegistry);
-                                                 } catch (IOException e) {
-                                                     throw new RuntimeException(e);
-                                                 }
-                                             });
+            (tck) -> {
+                try {
+                    return new LocallyCachedTopologyBlob(topologyId, isLocalMode, conf, fsOps,
+                                                      LocallyCachedTopologyBlob.TopologyBlobType
+                                                          .TOPO_CONF, owner, metricsRegistry);
+                } catch (IOException e) {
+                    throw new RuntimeException(e);
+                }
+            });
     }
 
     private LocalizedResource getUserArchive(String user, String key) {
@@ -209,22 +209,22 @@
 
         CompletableFuture<Void> baseBlobs = requestDownloadBaseTopologyBlobs(pna, cb);
         return baseBlobs.thenComposeAsync((v) ->
-                                              blobPending.compute(topologyId, (tid, old) -> {
-                                                  CompletableFuture<Void> ret = old;
-                                                  if (ret == null) {
-                                                      ret = CompletableFuture.supplyAsync(new DownloadBlobs(pna, cb), execService);
-                                                  } else {
-                                                      try {
-                                                          addReferencesToBlobs(pna, cb);
-                                                      } catch (Exception e) {
-                                                          throw new RuntimeException(e);
-                                                      } finally {
-                                                          pna.complete();
-                                                      }
-                                                  }
-                                                  LOG.debug("Reserved blobs {} {}", topologyId, ret);
-                                                  return ret;
-                                              }));
+            blobPending.compute(topologyId, (tid, old) -> {
+                CompletableFuture<Void> ret = old;
+                if (ret == null) {
+                    ret = CompletableFuture.supplyAsync(new DownloadBlobs(pna, cb), execService);
+                } else {
+                    try {
+                        addReferencesToBlobs(pna, cb);
+                    } catch (Exception e) {
+                        throw new RuntimeException(e);
+                    } finally {
+                        pna.complete();
+                    }
+                }
+                LOG.debug("Reserved blobs {} {}", topologyId, ret);
+                return ret;
+            }));
     }
 
     @VisibleForTesting
@@ -241,7 +241,7 @@
         topoConf.addReference(pna, cb);
 
         return topologyBasicDownloaded.computeIfAbsent(topologyId,
-                                                       (tid) -> downloadOrUpdate(topoJar, topoCode, topoConf));
+            (tid) -> downloadOrUpdate(topoJar, topoCode, topoConf));
     }
 
     private CompletableFuture<Void> downloadOrUpdate(LocallyCachedBlob... blobs) {
@@ -656,7 +656,7 @@
         private final PortAndAssignment pna;
         private final BlobChangingCallback cb;
 
-        public DownloadBlobs(PortAndAssignment pna, BlobChangingCallback cb) {
+        DownloadBlobs(PortAndAssignment pna, BlobChangingCallback cb) {
             this.pna = pna;
             this.cb = cb;
         }
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/BlobChangingCallback.java b/storm-server/src/main/java/org/apache/storm/localizer/BlobChangingCallback.java
index ebbbdc5..2ad96ce 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/BlobChangingCallback.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/BlobChangingCallback.java
@@ -29,7 +29,7 @@
      * Informs the listener that a blob has changed and is ready to update and replace a localized blob that has been marked as tied to the
      * life cycle of the worker process.
      *
-     * If `go.getLatch()` is never called before the method completes it is assumed that the listener is good with the blob changing.
+     * <p>If `go.getLatch()` is never called before the method completes it is assumed that the listener is good with the blob changing.
      *
      * @param assignment the assignment this resource and callback are registered with.
      * @param port       the port that this resource and callback are registered with.
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/GoodToGo.java b/storm-server/src/main/java/org/apache/storm/localizer/GoodToGo.java
index 9217388..5050e86 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/GoodToGo.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/GoodToGo.java
@@ -29,6 +29,7 @@
 public class GoodToGo {
     private final GoodToGoLatch latch;
     private boolean gotLatch = false;
+
     public GoodToGo(CountDownLatch latch, Future<Void> doneChanging) {
         this.latch = new GoodToGoLatch(latch, doneChanging);
     }
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/IOFunction.java b/storm-server/src/main/java/org/apache/storm/localizer/IOFunction.java
index e8d8171..0a732b7 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/IOFunction.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/IOFunction.java
@@ -17,6 +17,8 @@
 import java.io.IOException;
 
 @FunctionalInterface
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public interface IOFunction<T, R> {
+
     R apply(T t) throws IOException;
 }
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResource.java b/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResource.java
index ea3bdac..87bd970 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResource.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResource.java
@@ -220,7 +220,7 @@
     private void setSize() {
         // we trust that the file exists
         Path withVersion = getFilePathWithVersion();
-        size = ServerUtils.getDU(withVersion.toFile());
+        size = ServerUtils.getDiskUsage(withVersion.toFile());
         LOG.debug("size of {} is: {}", withVersion, size);
     }
 
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResourceRetentionSet.java b/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResourceRetentionSet.java
index 25d6f4d..3c31d4e 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResourceRetentionSet.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/LocalizedResourceRetentionSet.java
@@ -81,10 +81,6 @@
     public void cleanup(ClientBlobStore store) {
         LOG.debug("cleanup target size: {} current size is: {}", targetSize, currentSize);
         long bytesOver = currentSize - targetSize;
-        if (bytesOver <= 0) { // no need to query remote files
-            return;
-        }
-
         //First delete everything that no longer exists...
         for (Iterator<Map.Entry<LocallyCachedBlob, Map<String, ? extends LocallyCachedBlob>>> i = noReferences.entrySet().iterator();
              i.hasNext(); ) {
@@ -138,6 +134,7 @@
         return "Cache: " + currentSize;
     }
 
+    @SuppressWarnings("checkstyle:AbbreviationAsWordInName")
     static class LRUComparator implements Comparator<LocallyCachedBlob> {
         @Override
         public int compare(LocallyCachedBlob r1, LocallyCachedBlob r2) {
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/LocallyCachedBlob.java b/storm-server/src/main/java/org/apache/storm/localizer/LocallyCachedBlob.java
index c948240..a198c86 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/LocallyCachedBlob.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/LocallyCachedBlob.java
@@ -299,7 +299,7 @@
         private final Path downloadPath;
         private final long version;
 
-        public DownloadMeta(Path downloadPath, long version) {
+        DownloadMeta(Path downloadPath, long version) {
             this.downloadPath = downloadPath;
             this.version = version;
         }
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/LocallyCachedTopologyBlob.java b/storm-server/src/main/java/org/apache/storm/localizer/LocallyCachedTopologyBlob.java
index 3b019f4..babed56 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/LocallyCachedTopologyBlob.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/LocallyCachedTopologyBlob.java
@@ -139,9 +139,8 @@
         if (isLocalMode && type == TopologyBlobType.TOPO_JAR) {
             LOG.debug("DOWNLOADING LOCAL JAR to TEMP LOCATION... {}", topologyId);
             //This is a special case where the jar was not uploaded so we will not download it (it is already on the classpath)
-            ClassLoader classloader = Thread.currentThread().getContextClassLoader();
             String resourcesJar = resourcesJar();
-            URL url = classloader.getResource(ServerConfigUtils.RESOURCES_SUBDIR);
+            URL url = ServerUtils.getResourceFromClassloader(ServerConfigUtils.RESOURCES_SUBDIR);
             Path extractionDest = topologyBasicBlobsRootDir.resolve(type.getTempExtractionDir(LOCAL_MODE_JAR_VERSION));
             if (resourcesJar != null) {
                 LOG.info("Extracting resources from jar at {} to {}", resourcesJar, extractionDest);
@@ -154,6 +153,10 @@
                 } else {
                     fsOps.copyDirectory(new File(url.getFile()), extractionDest.toFile());
                 }
+            } else if (!fsOps.fileExists(extractionDest)) {
+                // if we can't find the resources directory in a resources jar or in the classpath just create an empty
+                // resources directory. This way we can check later that the topology jar was fully downloaded.
+                fsOps.forceMkdir(extractionDest);
             }
             return LOCAL_MODE_JAR_VERSION;
         }
@@ -225,7 +228,7 @@
         }
         if (!(isLocalMode && type == TopologyBlobType.TOPO_JAR)) {
             //Don't try to move the JAR file in local mode, it does not exist because it was not uploaded
-            Files.move(tempLoc, dest);
+            fsOps.moveFile(tempLoc.toFile(), dest.toFile());
         }
         synchronized (LocallyCachedTopologyBlob.class) {
             //This is a bit ugly, but it works.  In order to maintain the same directory structure that existed before
@@ -258,11 +261,12 @@
     private void cleanUpTemp(String baseName) throws IOException {
         LOG.debug("Cleaning up temporary data in {}", topologyBasicBlobsRootDir);
         try (DirectoryStream<Path> children = fsOps.newDirectoryStream(topologyBasicBlobsRootDir,
-                                                                       (p) -> {
-                                                                           String fileName = p.getFileName().toString();
-                                                                           Matcher m = EXTRACT_BASE_NAME_AND_VERSION.matcher(fileName);
-                                                                           return m.matches() && baseName.equals(m.group(1));
-                                                                       })) {
+            (p) -> {
+                String fileName = p.getFileName().toString();
+                Matcher m = EXTRACT_BASE_NAME_AND_VERSION.matcher(fileName);
+                return m.matches() && baseName.equals(m.group(1));
+            })
+        ) {
             //children is only ever null if topologyBasicBlobsRootDir does not exist.  This happens during unit tests
             // And because a non-existant directory is by definition clean we are ignoring it.
             if (children != null) {
diff --git a/storm-server/src/main/java/org/apache/storm/localizer/PortAndAssignmentImpl.java b/storm-server/src/main/java/org/apache/storm/localizer/PortAndAssignmentImpl.java
index af9bb8b..509ef14 100644
--- a/storm-server/src/main/java/org/apache/storm/localizer/PortAndAssignmentImpl.java
+++ b/storm-server/src/main/java/org/apache/storm/localizer/PortAndAssignmentImpl.java
@@ -21,7 +21,7 @@
     private final int port;
     private final LocalAssignment assignment;
 
-    public PortAndAssignmentImpl(int port, LocalAssignment assignment) {
+    PortAndAssignmentImpl(int port, LocalAssignment assignment) {
         this.port = port;
         this.assignment = assignment;
     }
diff --git a/storm-server/src/main/java/org/apache/storm/metric/ClusterMetricsConsumerExecutor.java b/storm-server/src/main/java/org/apache/storm/metric/ClusterMetricsConsumerExecutor.java
index cfeb63c..6f2cd30 100644
--- a/storm-server/src/main/java/org/apache/storm/metric/ClusterMetricsConsumerExecutor.java
+++ b/storm-server/src/main/java/org/apache/storm/metric/ClusterMetricsConsumerExecutor.java
@@ -21,9 +21,9 @@
 public class ClusterMetricsConsumerExecutor {
     public static final Logger LOG = LoggerFactory.getLogger(ClusterMetricsConsumerExecutor.class);
     private static final String ERROR_MESSAGE_PREPARATION_CLUSTER_METRICS_CONSUMER_FAILED =
-        "Preparation of Cluster Metrics Consumer failed. " +
-        "Please check your configuration and/or corresponding systems and relaunch Nimbus. " +
-        "Skipping handle metrics.";
+            "Preparation of Cluster Metrics Consumer failed. "
+                    + "Please check your configuration and/or corresponding systems and relaunch Nimbus. "
+                    + "Skipping handle metrics.";
 
     private IClusterMetricsConsumer metricsConsumer;
     private String consumerClassName;
@@ -39,8 +39,9 @@
             metricsConsumer = (IClusterMetricsConsumer) Class.forName(consumerClassName).newInstance();
             metricsConsumer.prepare(registrationArgument);
         } catch (Exception e) {
-            LOG.error("Could not instantiate or prepare Cluster Metrics Consumer with fully qualified name " +
-                      consumerClassName, e);
+            LOG.error("Could not instantiate or prepare Cluster Metrics Consumer with fully qualified name "
+                    + consumerClassName,
+                    e);
 
             if (metricsConsumer != null) {
                 metricsConsumer.cleanup();
diff --git a/storm-server/src/main/java/org/apache/storm/metric/LoggingClusterMetricsConsumer.java b/storm-server/src/main/java/org/apache/storm/metric/LoggingClusterMetricsConsumer.java
index f07dc54..0e296fb 100644
--- a/storm-server/src/main/java/org/apache/storm/metric/LoggingClusterMetricsConsumer.java
+++ b/storm-server/src/main/java/org/apache/storm/metric/LoggingClusterMetricsConsumer.java
@@ -19,20 +19,18 @@
 import org.slf4j.LoggerFactory;
 
 /**
- * Listens for cluster related metrics, dumps them to log
+ * Listens for cluster related metrics, dumps them to log.
  *
- * To use, edit the storm.yaml config file:
- *
+ * <p>To use, edit the storm.yaml config file:
  * ```yaml
  *   storm.cluster.metrics.register:
  *     - class: "org.apache.storm.metrics.LoggingClusterMetricsConsumer"
  * ```
- *
  */
 public class LoggingClusterMetricsConsumer implements IClusterMetricsConsumer {
     public static final Logger LOG = LoggerFactory.getLogger(LoggingClusterMetricsConsumer.class);
 
-    static private String padding = "                       ";
+    private static String padding = "                       ";
 
     @Override
     public void prepare(Object registrationArgument) {
@@ -58,8 +56,8 @@
         for (DataPoint p : dataPoints) {
             sb.delete(header.length(), sb.length());
             sb.append(p.getName())
-              .append(padding).delete(header.length() + 23, sb.length()).append("\t")
-              .append(p.getValue());
+                    .append(padding).delete(header.length() + 23, sb.length()).append("\t")
+                    .append(p.getValue());
             LOG.info(sb.toString());
         }
     }
@@ -72,8 +70,10 @@
         for (DataPoint p : dataPoints) {
             sb.delete(header.length(), sb.length());
             sb.append(p.getName())
-              .append(padding).delete(header.length() + 23, sb.length()).append("\t")
-              .append(p.getValue());
+                    .append(padding)
+                    .delete(header.length() + 23, sb.length())
+                    .append("\t")
+                    .append(p.getValue());
             LOG.info(sb.toString());
         }
     }
diff --git a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbMetricsWriter.java b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbMetricsWriter.java
index fae3865..2c01423 100644
--- a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbMetricsWriter.java
+++ b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbMetricsWriter.java
@@ -184,8 +184,7 @@
         // attempt to find the string in the database
         try {
             stringMetadata = store.rocksDbGetStringMetadata(type, s);
-        }
-        catch (RocksDBException e) {
+        } catch (RocksDBException e) {
             throw new MetricException("Error reading metrics data", e);
         }
         if (stringMetadata != null) {
@@ -244,8 +243,7 @@
                     unusedIds.remove(key.getMetadataStringId());
                     return true; // process all metadata
                 });
-            }
-            catch (RocksDBException e) {
+            } catch (RocksDBException e) {
                 throw new MetricException("Error reading metrics data", e);
             }
         }
diff --git a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbStore.java b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbStore.java
index ba3f08b..89e4cec 100644
--- a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbStore.java
+++ b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/RocksDbStore.java
@@ -276,8 +276,7 @@
         // attempt to find the string in the database
         try {
             stringMetadata = rocksDbGetStringMetadata(type, s);
-        }
-        catch (RocksDBException e) {
+        } catch (RocksDBException e) {
             throw new MetricException("Error reading metric data", e);
         }
 
@@ -525,18 +524,15 @@
 
                                 // callback to caller
                                 scanCallback.cb(metric);
-                            }
-                            catch (MetricException e) {
+                            } catch (MetricException e) {
                                 LOG.warn("Failed to report found metric: {}", e.getMessage());
                             }
-                        }
-                        else {
+                        } else {
                             try {
                                 if (!rawCallback.cb(key, val)) {
                                     return;
                                 }
-                            }
-                            catch (RocksDBException e) {
+                            } catch (RocksDBException e) {
                                 throw new MetricException("Error reading metrics data", e);
                             }
                         }
@@ -627,8 +623,7 @@
                     }
                     return true;
                 });
-            }
-            catch (RocksDBException e) {
+            } catch (RocksDBException e) {
                 throw new MetricException("Error reading metric data", e);
             }
 
diff --git a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/StringMetadataCache.java b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/StringMetadataCache.java
index bf90c69..e8428c3 100644
--- a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/StringMetadataCache.java
+++ b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/StringMetadataCache.java
@@ -109,10 +109,10 @@
     /**
      * Add the string metadata to the cache.
      *
-     * NOTE: this can cause data to be evicted from the cache when full.  When this occurs, the evictionCallback() method
+     * <p>NOTE: this can cause data to be evicted from the cache when full.  When this occurs, the evictionCallback() method
      * is called to store the metadata back into the RocksDB database.
      *
-     * This method is only exposed to the WritableStringMetadataCache interface.
+     * <p>This method is only exposed to the WritableStringMetadataCache interface.
      *
      * @param s   The string to add
      * @param stringMetadata  The string's metadata
diff --git a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/WritableStringMetadataCache.java b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/WritableStringMetadataCache.java
index fc5f0b1..d31fe9f 100644
--- a/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/WritableStringMetadataCache.java
+++ b/storm-server/src/main/java/org/apache/storm/metricstore/rocksdb/WritableStringMetadataCache.java
@@ -24,10 +24,10 @@
     /**
      * Add the string metadata to the cache.
      *
-     * * NOTE: this can cause data to be evicted from the cache when full.  When this occurs, the evictionCallback() method
+     * <p>NOTE: this can cause data to be evicted from the cache when full.  When this occurs, the evictionCallback() method
      * is called to store the metadata back into the RocksDB database.
      *
-     * This method is only exposed to the WritableStringMetadataCache interface.
+     * <p>This method is only exposed to the WritableStringMetadataCache interface.
      *
      * @param s   The string to add
      * @param stringMetadata  The string's metadata
diff --git a/storm-server/src/main/java/org/apache/storm/nimbus/AssignmentDistributionService.java b/storm-server/src/main/java/org/apache/storm/nimbus/AssignmentDistributionService.java
index 05cb1df..4f84997 100644
--- a/storm-server/src/main/java/org/apache/storm/nimbus/AssignmentDistributionService.java
+++ b/storm-server/src/main/java/org/apache/storm/nimbus/AssignmentDistributionService.java
@@ -186,7 +186,6 @@
      * Get an assignments from the target queue with the specific index.
      * @param queueIndex index of the queue
      * @return an {@link NodeAssignments}
-     * @throws InterruptedException
      */
     public NodeAssignments nextAssignments(Integer queueIndex) throws InterruptedException {
         NodeAssignments target = null;
diff --git a/storm-server/src/main/java/org/apache/storm/nimbus/DefaultTopologyValidator.java b/storm-server/src/main/java/org/apache/storm/nimbus/DefaultTopologyValidator.java
index 7fef7bf..c6bb208 100644
--- a/storm-server/src/main/java/org/apache/storm/nimbus/DefaultTopologyValidator.java
+++ b/storm-server/src/main/java/org/apache/storm/nimbus/DefaultTopologyValidator.java
@@ -24,7 +24,7 @@
     private static final Logger LOG = LoggerFactory.getLogger(DefaultTopologyValidator.class);
 
     @Override
-    public void prepare(Map<String, Object> StormConf) {
+    public void prepare(Map<String, Object> stormConf) {
     }
 
     @Override
diff --git a/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyActionNotifierPlugin.java b/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyActionNotifierPlugin.java
index 8247a85..1b5a6fb 100644
--- a/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyActionNotifierPlugin.java
+++ b/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyActionNotifierPlugin.java
@@ -18,21 +18,19 @@
  * A plugin interface that gets invoked any time there is an action for a topology.
  */
 public interface ITopologyActionNotifierPlugin {
+
     /**
      * Called once during nimbus initialization.
-     * @param StormConf
      */
-    void prepare(Map<String, Object> StormConf);
+    void prepare(Map<String, Object> stormConf);
 
     /**
      * When a new actions is executed for a topology, this method will be called.
-     * @param topologyName
-     * @param action
      */
     void notify(String topologyName, String action);
 
     /**
-     * called during shutdown.
+     * Called during shutdown.
      */
     void cleanup();
 }
diff --git a/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyValidator.java b/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyValidator.java
index 88d756d..ff17192 100644
--- a/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyValidator.java
+++ b/storm-server/src/main/java/org/apache/storm/nimbus/ITopologyValidator.java
@@ -18,7 +18,7 @@
 
 public interface ITopologyValidator {
 
-    void prepare(Map<String, Object> StormConf);
+    void prepare(Map<String, Object> stormConf);
 
     void validate(String topologyName, Map<String, Object> topologyConf, StormTopology topology)
         throws InvalidTopologyException;
diff --git a/storm-server/src/main/java/org/apache/storm/nimbus/LeaderListenerCallback.java b/storm-server/src/main/java/org/apache/storm/nimbus/LeaderListenerCallback.java
index bab42ce..08c7e99 100644
--- a/storm-server/src/main/java/org/apache/storm/nimbus/LeaderListenerCallback.java
+++ b/storm-server/src/main/java/org/apache/storm/nimbus/LeaderListenerCallback.java
@@ -12,6 +12,8 @@
 
 package org.apache.storm.nimbus;
 
+import com.codahale.metrics.Meter;
+
 import java.io.IOException;
 import java.util.HashSet;
 import java.util.List;
@@ -20,7 +22,6 @@
 import java.util.TreeSet;
 import javax.security.auth.Subject;
 
-import com.codahale.metrics.Meter;
 import org.apache.commons.io.IOUtils;
 import org.apache.storm.Config;
 import org.apache.storm.DaemonConfig;
@@ -90,7 +91,7 @@
         this.numLostLeader = metricsRegistry.registerMeter("nimbus:num-lost-leadership");
         //Since we only give up leadership if we're waiting for blobs to sync,
         //it makes sense to wait a full sync cycle before trying for leadership again.
-        this.requeueDelayMs = ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CODE_SYNC_FREQ_SECS))*1000;
+        this.requeueDelayMs = ObjectReader.getInt(conf.get(DaemonConfig.NIMBUS_CODE_SYNC_FREQ_SECS)) * 1000;
     }
 
     /**
diff --git a/storm-server/src/main/java/org/apache/storm/nimbus/NimbusHeartbeatsPressureTest.java b/storm-server/src/main/java/org/apache/storm/nimbus/NimbusHeartbeatsPressureTest.java
index 89e732b..e2ac486 100644
--- a/storm-server/src/main/java/org/apache/storm/nimbus/NimbusHeartbeatsPressureTest.java
+++ b/storm-server/src/main/java/org/apache/storm/nimbus/NimbusHeartbeatsPressureTest.java
@@ -176,7 +176,7 @@
         private int tryTimes;
         private NimbusClient client;
 
-        public HeartbeatSendTask(int taskId, int tryTimes) {
+        HeartbeatSendTask(int taskId, int tryTimes) {
             this.taskId = taskId;
             this.tryTimes = tryTimes;
             this.runtimesBook = new double[tryTimes];
diff --git a/storm-server/src/main/java/org/apache/storm/nimbus/TimeOutWorkerHeartbeatsRecoveryStrategy.java b/storm-server/src/main/java/org/apache/storm/nimbus/TimeOutWorkerHeartbeatsRecoveryStrategy.java
index ac01aae..a8ec26a 100644
--- a/storm-server/src/main/java/org/apache/storm/nimbus/TimeOutWorkerHeartbeatsRecoveryStrategy.java
+++ b/storm-server/src/main/java/org/apache/storm/nimbus/TimeOutWorkerHeartbeatsRecoveryStrategy.java
@@ -12,6 +12,8 @@
 
 package org.apache.storm.nimbus;
 
+import static java.util.stream.Collectors.toSet;
+
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
@@ -21,8 +23,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static java.util.stream.Collectors.toSet;
-
 /**
  * Wait for a node to report worker heartbeats until a configured timeout. For cases below we have strategies:
  *
diff --git a/storm-server/src/main/java/org/apache/storm/pacemaker/PacemakerServer.java b/storm-server/src/main/java/org/apache/storm/pacemaker/PacemakerServer.java
index 1543c64..d8364dc 100644
--- a/storm-server/src/main/java/org/apache/storm/pacemaker/PacemakerServer.java
+++ b/storm-server/src/main/java/org/apache/storm/pacemaker/PacemakerServer.java
@@ -48,13 +48,13 @@
     private String secret;
     private final String topologyName;
     private volatile ChannelGroup allChannels = new DefaultChannelGroup("storm-server", GlobalEventExecutor.INSTANCE);
-    private final ChannelGroup authenticated_channels = new DefaultChannelGroup("authenticated-pacemaker-channels", GlobalEventExecutor.INSTANCE);
+    private final ChannelGroup authenticatedChannels = new DefaultChannelGroup("authenticated-pacemaker-channels",
+            GlobalEventExecutor.INSTANCE);
     private final ThriftNettyServerCodec.AuthMethod authMethod;
     private final EventLoopGroup bossEventLoopGroup;
     private final EventLoopGroup workerEventLoopGroup;
 
-    public PacemakerServer(IServerMessageHandler handler, Map<String, Object> config) {
-        int maxWorkers = (int) config.get(DaemonConfig.PACEMAKER_MAX_THREADS);
+    PacemakerServer(IServerMessageHandler handler, Map<String, Object> config) {
         int port = (int) config.get(Config.PACEMAKER_PORT);
         this.handler = handler;
         this.topologyName = "pacemaker_server";
@@ -63,9 +63,9 @@
         switch (auth) {
 
             case "DIGEST":
-                Configuration login_conf = ClientAuthUtils.getConfiguration(config);
+                Configuration loginConf = ClientAuthUtils.getConfiguration(config);
                 authMethod = ThriftNettyServerCodec.AuthMethod.DIGEST;
-                this.secret = ClientAuthUtils.makeDigestPayload(login_conf, ClientAuthUtils.LOGIN_CONTEXT_PACEMAKER_DIGEST);
+                this.secret = ClientAuthUtils.makeDigestPayload(loginConf, ClientAuthUtils.LOGIN_CONTEXT_PACEMAKER_DIGEST);
                 if (this.secret == null) {
                     LOG.error("Can't start pacemaker server without digest secret.");
                     throw new RuntimeException("Can't start pacemaker server without digest secret.");
@@ -89,6 +89,7 @@
         ThreadFactory workerFactory = new NettyRenameThreadFactory("server-worker");
         this.bossEventLoopGroup = new NioEventLoopGroup(1, bossFactory);
         // 0 means DEFAULT_EVENT_LOOP_THREADS
+        int maxWorkers = (int) config.get(DaemonConfig.PACEMAKER_MAX_THREADS);
         // https://github.com/netty/netty/blob/netty-4.1.24.Final/transport/src/main/java/io/netty/channel/MultithreadEventLoopGroup.java#L40
         this.workerEventLoopGroup = new NioEventLoopGroup(maxWorkers > 0 ? maxWorkers : 0, workerFactory);
 
@@ -121,7 +122,7 @@
     }
 
     public void cleanPipeline(Channel channel) {
-        boolean authenticated = authenticated_channels.contains(channel);
+        boolean authenticated = authenticatedChannels.contains(channel);
         if (!authenticated) {
             if (channel.pipeline().get(ThriftNettyServerCodec.SASL_HANDLER) != null) {
                 channel.pipeline().remove(ThriftNettyServerCodec.SASL_HANDLER);
@@ -135,7 +136,7 @@
     public void received(Object mesg, String remote, Channel channel) throws InterruptedException {
         cleanPipeline(channel);
 
-        boolean authenticated = (authMethod == ThriftNettyServerCodec.AuthMethod.NONE) || authenticated_channels.contains(channel);
+        boolean authenticated = (authMethod == ThriftNettyServerCodec.AuthMethod.NONE) || authenticatedChannels.contains(channel);
         HBMessage m = (HBMessage) mesg;
         LOG.debug("received message. Passing to handler. {} : {} : {}",
                   handler.toString(), m.toString(), channel.toString());
@@ -161,6 +162,6 @@
     @Override
     public void authenticated(Channel c) {
         LOG.debug("Pacemaker server authenticated channel: {}", c.toString());
-        authenticated_channels.add(c);
+        authenticatedChannels.add(c);
     }
 }
diff --git a/storm-server/src/main/java/org/apache/storm/pacemaker/codec/ThriftNettyServerCodec.java b/storm-server/src/main/java/org/apache/storm/pacemaker/codec/ThriftNettyServerCodec.java
index 45babf9..d15f369 100644
--- a/storm-server/src/main/java/org/apache/storm/pacemaker/codec/ThriftNettyServerCodec.java
+++ b/storm-server/src/main/java/org/apache/storm/pacemaker/codec/ThriftNettyServerCodec.java
@@ -53,32 +53,32 @@
     @Override
     protected void initChannel(Channel ch) throws Exception {
         ChannelPipeline pipeline = ch.pipeline();
-                pipeline.addLast("encoder", new ThriftEncoder());
+        pipeline.addLast("encoder", new ThriftEncoder());
         pipeline.addLast("decoder", new ThriftDecoder(thriftMessageMaxSizeBytes));
-                if (authMethod == AuthMethod.DIGEST) {
-                    try {
-                        LOG.debug("Adding SaslStormServerHandler to pacemaker server pipeline.");
-                        pipeline.addLast(SASL_HANDLER, new SaslStormServerHandler((ISaslServer) server));
-                    } catch (IOException e) {
-                        throw new RuntimeException(e);
-                    }
-                } else if (authMethod == AuthMethod.KERBEROS) {
-                    try {
-                        LOG.debug("Adding KerberosSaslServerHandler to pacemaker server pipeline.");
+        if (authMethod == AuthMethod.DIGEST) {
+            try {
+                LOG.debug("Adding SaslStormServerHandler to pacemaker server pipeline.");
+                pipeline.addLast(SASL_HANDLER, new SaslStormServerHandler((ISaslServer) server));
+            } catch (IOException e) {
+                throw new RuntimeException(e);
+            }
+        } else if (authMethod == AuthMethod.KERBEROS) {
+            try {
+                LOG.debug("Adding KerberosSaslServerHandler to pacemaker server pipeline.");
                 ArrayList<String> authorizedUsers = new ArrayList<>(1);
-                        authorizedUsers.add((String) topoConf.get(DaemonConfig.NIMBUS_DAEMON_USER));
-                        pipeline.addLast(KERBEROS_HANDLER, new KerberosSaslServerHandler((ISaslServer) server,
-                                                                                         topoConf,
-                                                                                         ClientAuthUtils.LOGIN_CONTEXT_PACEMAKER_SERVER,
-                                                                                         authorizedUsers));
-                    } catch (IOException e) {
-                        throw new RuntimeException(e);
-                    }
-                } else if (authMethod == AuthMethod.NONE) {
-                    LOG.debug("Not authenticating any clients. AuthMethod is NONE");
-                }
+                authorizedUsers.add((String) topoConf.get(DaemonConfig.NIMBUS_DAEMON_USER));
+                pipeline.addLast(KERBEROS_HANDLER, new KerberosSaslServerHandler((ISaslServer) server,
+                                                                                 topoConf,
+                                                                                 ClientAuthUtils.LOGIN_CONTEXT_PACEMAKER_SERVER,
+                                                                                 authorizedUsers));
+            } catch (IOException e) {
+                throw new RuntimeException(e);
+            }
+        } else if (authMethod == AuthMethod.NONE) {
+            LOG.debug("Not authenticating any clients. AuthMethod is NONE");
+        }
 
-                pipeline.addLast("handler", new StormServerHandler(server));
+        pipeline.addLast("handler", new StormServerHandler(server));
     }
 
     public enum AuthMethod {
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/Cluster.java b/storm-server/src/main/java/org/apache/storm/scheduler/Cluster.java
index 0f09aa7..04f0960 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/Cluster.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/Cluster.java
@@ -54,8 +54,6 @@
  */
 public class Cluster implements ISchedulingState {
     private static final Logger LOG = LoggerFactory.getLogger(Cluster.class);
-    private static final Function<String, Set<WorkerSlot>> MAKE_SET = (x) -> new HashSet<>();
-    private static final Function<String, Map<WorkerSlot, NormalizedResourceRequest>> MAKE_MAP = (x) -> new HashMap<>();
 
     /**
      * key: supervisor id, value: supervisor details.
@@ -80,6 +78,7 @@
     private final Map<String, Object> conf;
     private final Topologies topologies;
     private final Map<String, Map<WorkerSlot, NormalizedResourceRequest>> nodeToScheduledResourcesCache;
+    private final Map<String, Map<String, Double>> nodeToScheduledOffHeapNodeMemoryCache;   // node -> topologyId -> double
     private final Map<String, Set<WorkerSlot>> nodeToUsedSlotsCache;
     private final Map<String, NormalizedResourceRequest> totalResourcesPerNodeCache = new HashMap<>();
     private final ResourceMetrics resourceMetrics;
@@ -88,6 +87,14 @@
     private INimbus inimbus;
     private double minWorkerCpu = 0.0;
 
+    private static <K, V> Map<K, V> makeMap(String key) {
+        return new HashMap<>();
+    }
+
+    private static <K> Set<K> makeSet(String key) {
+        return new HashSet<>();
+    }
+
     public Cluster(
         INimbus nimbus,
         ResourceMetrics resourceMetrics,
@@ -148,6 +155,7 @@
         this.resourceMetrics = resourceMetrics;
         this.supervisors.putAll(supervisors);
         this.nodeToScheduledResourcesCache = new HashMap<>(this.supervisors.size());
+        this.nodeToScheduledOffHeapNodeMemoryCache = new HashMap<>();
         this.nodeToUsedSlotsCache = new HashMap<>(this.supervisors.size());
 
         for (Map.Entry<String, SupervisorDetails> entry : supervisors.entrySet()) {
@@ -359,7 +367,7 @@
 
     @Override
     public Set<Integer> getUsedPorts(SupervisorDetails supervisor) {
-        return nodeToUsedSlotsCache.computeIfAbsent(supervisor.getId(), MAKE_SET)
+        return nodeToUsedSlotsCache.computeIfAbsent(supervisor.getId(), Cluster::makeSet)
             .stream()
             .map(WorkerSlot::getPort)
             .collect(Collectors.toSet());
@@ -504,7 +512,7 @@
         }
         for (SharedMemory shared : td.getSharedMemoryRequests(executors)) {
             totalResources.addOffHeap(shared.get_off_heap_worker());
-            totalResources.addOnHeap(shared.get_off_heap_worker());
+            totalResources.addOnHeap(shared.get_on_heap());
 
             addResource(
                 sharedTotalResources,
@@ -556,11 +564,7 @@
         }
 
         double currentTotal = 0.0;
-        double afterTotal = 0.0;
-        double afterOnHeap = 0.0;
-
         double currentCpuTotal = 0.0;
-        double afterCpuTotal = 0.0;
 
         Set<ExecutorDetails> wouldBeAssigned = new HashSet<>();
         wouldBeAssigned.add(exec);
@@ -574,18 +578,17 @@
                 currentTotal = wrCurrent.get_mem_off_heap() + wrCurrent.get_mem_on_heap();
                 currentCpuTotal = wrCurrent.get_cpu();
             }
-            WorkerResources wrAfter = calculateWorkerResources(td, wouldBeAssigned);
-            afterTotal = wrAfter.get_mem_off_heap() + wrAfter.get_mem_on_heap();
-            afterOnHeap = wrAfter.get_mem_on_heap();
 
-            currentTotal += calculateSharedOffHeapMemory(ws.getNodeId(), assignment);
-            afterTotal += calculateSharedOffHeapMemory(ws.getNodeId(), assignment, exec);
-            afterCpuTotal = wrAfter.get_cpu();
-        } else {
-            WorkerResources wrAfter = calculateWorkerResources(td, wouldBeAssigned);
-            afterCpuTotal = wrAfter.get_cpu();
+            currentTotal += calculateSharedOffHeapNodeMemory(ws.getNodeId(), assignment, td);
         }
 
+        WorkerResources wrAfter = calculateWorkerResources(td, wouldBeAssigned);
+        double afterTotal = wrAfter.get_mem_off_heap() + wrAfter.get_mem_on_heap();
+        afterTotal += calculateSharedOffHeapNodeMemory(ws.getNodeId(), assignment, td, exec);
+
+        double afterOnHeap = wrAfter.get_mem_on_heap();
+        double afterCpuTotal = wrAfter.get_cpu();
+
         double cpuAdded = afterCpuTotal - currentCpuTotal;
         double cpuAvailable = resourcesAvailable.getTotalCpu();
 
@@ -673,9 +676,9 @@
 
         assignment.assign(slot, executors, resources);
         String nodeId = slot.getNodeId();
-        double sharedOffHeapMemory = calculateSharedOffHeapMemory(nodeId, assignment);
-        assignment.setTotalSharedOffHeapMemory(nodeId, sharedOffHeapMemory);
-        updateCachesForWorkerSlot(slot, resources, sharedOffHeapMemory);
+        double sharedOffHeapNodeMemory = calculateSharedOffHeapNodeMemory(nodeId, assignment, td);
+        assignment.setTotalSharedOffHeapNodeMemory(nodeId, sharedOffHeapNodeMemory);
+        updateCachesForWorkerSlot(slot, resources, topologyId, sharedOffHeapNodeMemory);
         totalResourcesPerNodeCache.remove(slot.getNodeId());
     }
 
@@ -700,31 +703,32 @@
     }
 
     /**
-     * Calculate the amount of shared off heap memory on a given nodes with the given assignment.
+     * Calculate the amount of shared off heap node memory on a given node with the given assignment.
      *
      * @param nodeId     the id of the node
      * @param assignment the current assignment
-     * @return the amount of shared off heap memory for that node in MB
+     * @param td         the topology details
+     * @return the amount of shared off heap node memory for that node in MB
      */
-    private double calculateSharedOffHeapMemory(String nodeId, SchedulerAssignmentImpl assignment) {
-        return calculateSharedOffHeapMemory(nodeId, assignment, null);
+    private double calculateSharedOffHeapNodeMemory(String nodeId, SchedulerAssignmentImpl assignment, TopologyDetails td) {
+        return calculateSharedOffHeapNodeMemory(nodeId, assignment, td, null);
     }
 
-    private double calculateSharedOffHeapMemory(
-        String nodeId, SchedulerAssignmentImpl assignment, ExecutorDetails extra) {
-        double memorySharedWithinNode = 0.0;
-        TopologyDetails td = topologies.getById(assignment.getTopologyId());
+    private double calculateSharedOffHeapNodeMemory(
+        String nodeId, SchedulerAssignmentImpl assignment, TopologyDetails td, ExecutorDetails extra) {
         Set<ExecutorDetails> executorsOnNode = new HashSet<>();
-        for (Entry<WorkerSlot, Collection<ExecutorDetails>> entry :
-            assignment.getSlotToExecutors().entrySet()) {
-            if (nodeId.equals(entry.getKey().getNodeId())) {
-                executorsOnNode.addAll(entry.getValue());
+        if (assignment != null) {
+            for (Entry<WorkerSlot, Collection<ExecutorDetails>> entry : assignment.getSlotToExecutors().entrySet()) {
+                if (nodeId.equals(entry.getKey().getNodeId())) {
+                    executorsOnNode.addAll(entry.getValue());
+                }
             }
         }
         if (extra != null) {
             executorsOnNode.add(extra);
         }
         //Now check for overlap on the node
+        double memorySharedWithinNode = 0.0;
         for (SharedMemory shared : td.getSharedMemoryRequests(executorsOnNode)) {
             memorySharedWithinNode += shared.get_off_heap_node();
         }
@@ -743,10 +747,11 @@
                 assertValidTopologyForModification(assignment.getTopologyId());
                 assignment.unassignBySlot(slot);
                 String nodeId = slot.getNodeId();
-                assignment.setTotalSharedOffHeapMemory(
-                    nodeId, calculateSharedOffHeapMemory(nodeId, assignment));
-                nodeToScheduledResourcesCache.computeIfAbsent(nodeId, MAKE_MAP).put(slot, new NormalizedResourceRequest());
-                nodeToUsedSlotsCache.computeIfAbsent(nodeId, MAKE_SET).remove(slot);
+                TopologyDetails td = topologies.getById(assignment.getTopologyId());
+                assignment.setTotalSharedOffHeapNodeMemory(
+                    nodeId, calculateSharedOffHeapNodeMemory(nodeId, assignment, td));
+                nodeToScheduledResourcesCache.computeIfAbsent(nodeId, Cluster::makeMap).put(slot, new NormalizedResourceRequest());
+                nodeToUsedSlotsCache.computeIfAbsent(nodeId, Cluster::makeSet).remove(slot);
             }
         }
         //Invalidate the cache as something on the node changed
@@ -768,7 +773,7 @@
 
     @Override
     public boolean isSlotOccupied(WorkerSlot slot) {
-        return nodeToUsedSlotsCache.computeIfAbsent(slot.getNodeId(), MAKE_SET).contains(slot);
+        return nodeToUsedSlotsCache.computeIfAbsent(slot.getNodeId(), Cluster::makeSet).contains(slot);
     }
 
     @Override
@@ -963,7 +968,7 @@
                 sr = sr.add(entry.getValue());
                 ret.put(id, sr);
             }
-            Map<String, Double> nodeIdToSharedOffHeap = assignment.getNodeIdToTotalSharedOffHeapMemory();
+            Map<String, Double> nodeIdToSharedOffHeap = assignment.getNodeIdToTotalSharedOffHeapNodeMemory();
             if (nodeIdToSharedOffHeap != null) {
                 for (Entry<String, Double> entry : nodeIdToSharedOffHeap.entrySet()) {
                     String id = entry.getKey();
@@ -1003,13 +1008,14 @@
     /**
      * This method updates ScheduledResources and UsedSlots cache for given workerSlot.
      */
-    private void updateCachesForWorkerSlot(WorkerSlot workerSlot, WorkerResources workerResources, Double sharedoffHeapMemory) {
+    private void updateCachesForWorkerSlot(WorkerSlot workerSlot, WorkerResources workerResources, String topologyId,
+                                           Double sharedOffHeapNodeMemory) {
         String nodeId = workerSlot.getNodeId();
         NormalizedResourceRequest normalizedResourceRequest = new NormalizedResourceRequest();
         normalizedResourceRequest.add(workerResources);
-        normalizedResourceRequest.addOffHeap(sharedoffHeapMemory);
-        nodeToScheduledResourcesCache.computeIfAbsent(nodeId, MAKE_MAP).put(workerSlot, normalizedResourceRequest);
-        nodeToUsedSlotsCache.computeIfAbsent(nodeId, MAKE_SET).add(workerSlot);
+        nodeToScheduledResourcesCache.computeIfAbsent(nodeId, Cluster::makeMap).put(workerSlot, normalizedResourceRequest);
+        nodeToScheduledOffHeapNodeMemoryCache.computeIfAbsent(nodeId, Cluster::makeMap).put(topologyId, sharedOffHeapNodeMemory);
+        nodeToUsedSlotsCache.computeIfAbsent(nodeId, Cluster::makeSet).add(workerSlot);
     }
 
     public ResourceMetrics getResourceMetrics() {
@@ -1019,10 +1025,17 @@
     @Override
     public NormalizedResourceRequest getAllScheduledResourcesForNode(String nodeId) {
         return totalResourcesPerNodeCache.computeIfAbsent(nodeId, (nid) -> {
+            // executor resources
             NormalizedResourceRequest totalScheduledResources = new NormalizedResourceRequest();
-            for (NormalizedResourceRequest req : nodeToScheduledResourcesCache.computeIfAbsent(nodeId, MAKE_MAP).values()) {
+            for (NormalizedResourceRequest req : nodeToScheduledResourcesCache.computeIfAbsent(nodeId, Cluster::makeMap).values()) {
                 totalScheduledResources.add(req);
             }
+            // shared off heap node memory
+            for (Double offHeapNodeMemory
+                    : nodeToScheduledOffHeapNodeMemoryCache.computeIfAbsent(nid, Cluster::makeMap).values()) {
+                totalScheduledResources.addOffHeap(offHeapNodeMemory);
+            }
+
             return totalScheduledResources;
         });
     }
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/ISupervisor.java b/storm-server/src/main/java/org/apache/storm/scheduler/ISupervisor.java
index 08d1229..7cb1dd6 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/ISupervisor.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/ISupervisor.java
@@ -16,6 +16,7 @@
 import java.util.Map;
 
 public interface ISupervisor {
+
     void prepare(Map<String, Object> topoConf, String schedulerLocalDir);
 
     // for mesos, this is {hostname}-{topologyid}
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/IsolationScheduler.java b/storm-server/src/main/java/org/apache/storm/scheduler/IsolationScheduler.java
index 03c0c6a..85e752b 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/IsolationScheduler.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/IsolationScheduler.java
@@ -40,7 +40,7 @@
 // blacklist the good hosts and remove those workers from the list of need to be assigned workers
 // otherwise unassign all other workers for isolated topologies if assigned
 public class IsolationScheduler implements IScheduler {
-    private final static Logger LOG = LoggerFactory.getLogger(IsolationScheduler.class);
+    private static final Logger LOG = LoggerFactory.getLogger(IsolationScheduler.class);
 
     private Map<String, Number> isoMachines;
 
@@ -65,7 +65,6 @@
     // set blacklist to what it was initially
     @Override
     public void schedule(Topologies topologies, Cluster cluster) {
-        Set<String> origBlacklist = cluster.getBlacklistedHosts();
         List<TopologyDetails> isoTopologies = isolatedTopologies(topologies.getTopologies());
         Set<String> isoIds = extractTopologyIds(isoTopologies);
         Map<String, Set<Set<ExecutorDetails>>> topologyWorkerSpecs = topologyWorkerSpecs(isoTopologies);
@@ -139,6 +138,7 @@
             Topologies leftOverTopologies = leftoverTopologies(topologies, allocatedTopologies);
             DefaultScheduler.defaultSchedule(leftOverTopologies, cluster);
         }
+        Set<String> origBlacklist = cluster.getBlacklistedHosts();
         cluster.setBlacklistedHosts(origBlacklist);
     }
 
@@ -218,8 +218,8 @@
 
         List<ExecutorDetails> allExecutors = new ArrayList<ExecutorDetails>();
         Collection<List<ExecutorDetails>> values = compExecutors.values();
-        for (List<ExecutorDetails> eList : values) {
-            allExecutors.addAll(eList);
+        for (List<ExecutorDetails> value : values) {
+            allExecutors.addAll(value);
         }
 
         int numWorkers = topology.getNumWorkers();
@@ -377,7 +377,7 @@
         private String topologyId;
         private Set<ExecutorDetails> executors;
 
-        public AssignmentInfo(WorkerSlot workerSlot, String topologyId, Set<ExecutorDetails> executors) {
+        AssignmentInfo(WorkerSlot workerSlot, String topologyId, Set<ExecutorDetails> executors) {
             this.workerSlot = workerSlot;
             this.topologyId = topologyId;
             this.executors = executors;
@@ -401,7 +401,7 @@
         private String hostName;
         private List<WorkerSlot> workerSlots;
 
-        public HostAssignableSlots(String hostName, List<WorkerSlot> workerSlots) {
+        HostAssignableSlots(String hostName, List<WorkerSlot> workerSlots) {
             this.hostName = hostName;
             this.workerSlots = workerSlots;
         }
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/SchedulerAssignment.java b/storm-server/src/main/java/org/apache/storm/scheduler/SchedulerAssignment.java
index 347c95f..bf4815e 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/SchedulerAssignment.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/SchedulerAssignment.java
@@ -30,7 +30,7 @@
      * @param slot the slot to check.
      * @return true if the slot is occupied by this assignment else false.
      */
-    public boolean isSlotOccupied(WorkerSlot slot);
+    boolean isSlotOccupied(WorkerSlot slot);
 
     /**
      * Is the executor assigned or not.
@@ -38,54 +38,54 @@
      * @param executor the executor to check it if is assigned.
      * @return true if it is assigned else false
      */
-    public boolean isExecutorAssigned(ExecutorDetails executor);
+    boolean isExecutorAssigned(ExecutorDetails executor);
 
     /**
      * Return the ID of the topology.
      *
      * @return the topology-id this assignment is for.
      */
-    public String getTopologyId();
+    String getTopologyId();
 
     /**
      * Get the map of executor to WorkerSlot.
      *
      * @return the executor -> slot map.
      */
-    public Map<ExecutorDetails, WorkerSlot> getExecutorToSlot();
+    Map<ExecutorDetails, WorkerSlot> getExecutorToSlot();
 
     /**
      * Get the set of all executors.
      *
      * @return the executors covered by this assignments
      */
-    public Set<ExecutorDetails> getExecutors();
+    Set<ExecutorDetails> getExecutors();
 
     /**
      * Get the set of all slots that are a part of this.
      *
      * @return the set of all slots.
      */
-    public Set<WorkerSlot> getSlots();
+    Set<WorkerSlot> getSlots();
 
     /**
      * Get the mapping of slot to executors on that slot.
      *
      * @return the slot to the executors assigned to that slot.
      */
-    public Map<WorkerSlot, Collection<ExecutorDetails>> getSlotToExecutors();
+    Map<WorkerSlot, Collection<ExecutorDetails>> getSlotToExecutors();
 
     /**
      * Get the slot to resource mapping.
      *
      * @return The slot to resource mapping
      */
-    public Map<WorkerSlot, WorkerResources> getScheduledResources();
+    Map<WorkerSlot, WorkerResources> getScheduledResources();
 
     /**
-     * Get the total shared off heap memory mapping.
+     * Get the total shared off heap node memory mapping.
      *
-     * @return host to total shared off heap memory mapping.
+     * @return host to total shared off heap node memory mapping.
      */
-    public Map<String, Double> getNodeIdToTotalSharedOffHeapMemory();
+    Map<String, Double> getNodeIdToTotalSharedOffHeapNodeMemory();
 }
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/SchedulerAssignmentImpl.java b/storm-server/src/main/java/org/apache/storm/scheduler/SchedulerAssignmentImpl.java
index 077dafe..ee41630 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/SchedulerAssignmentImpl.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/SchedulerAssignmentImpl.java
@@ -18,12 +18,10 @@
 
 package org.apache.storm.scheduler;
 
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedList;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.function.Function;
@@ -45,7 +43,7 @@
      */
     private final Map<ExecutorDetails, WorkerSlot> executorToSlot = new HashMap<>();
     private final Map<WorkerSlot, WorkerResources> resources = new HashMap<>();
-    private final Map<String, Double> nodeIdToTotalSharedOffHeap = new HashMap<>();
+    private final Map<String, Double> nodeIdToTotalSharedOffHeapNode = new HashMap<>();
     private final Map<WorkerSlot, Collection<ExecutorDetails>> slotToExecutors = new HashMap<>();
 
     /**
@@ -78,7 +76,7 @@
             if (nodeIdToTotalSharedOffHeap.entrySet().stream().anyMatch((entry) -> entry.getKey() == null || entry.getValue() == null)) {
                 throw new RuntimeException("Cannot create off heap with a null in it " + nodeIdToTotalSharedOffHeap);
             }
-            this.nodeIdToTotalSharedOffHeap.putAll(nodeIdToTotalSharedOffHeap);
+            this.nodeIdToTotalSharedOffHeapNode.putAll(nodeIdToTotalSharedOffHeap);
         }
     }
 
@@ -88,7 +86,7 @@
 
     public SchedulerAssignmentImpl(SchedulerAssignment assignment) {
         this(assignment.getTopologyId(), assignment.getExecutorToSlot(),
-             assignment.getScheduledResources(), assignment.getNodeIdToTotalSharedOffHeapMemory());
+             assignment.getScheduledResources(), assignment.getNodeIdToTotalSharedOffHeapNodeMemory());
     }
 
     @Override
@@ -132,7 +130,7 @@
         SchedulerAssignmentImpl o = (SchedulerAssignmentImpl) other;
 
         return resources.equals(o.resources)
-               && nodeIdToTotalSharedOffHeap.equals(o.nodeIdToTotalSharedOffHeap);
+               && nodeIdToTotalSharedOffHeapNode.equals(o.nodeIdToTotalSharedOffHeapNode);
     }
 
     @Override
@@ -186,7 +184,7 @@
             }
         }
         if (!isFound) {
-            nodeIdToTotalSharedOffHeap.remove(node);
+            nodeIdToTotalSharedOffHeapNode.remove(node);
         }
     }
 
@@ -225,12 +223,12 @@
         return resources;
     }
 
-    public void setTotalSharedOffHeapMemory(String node, double value) {
-        nodeIdToTotalSharedOffHeap.put(node, value);
+    public void setTotalSharedOffHeapNodeMemory(String node, double value) {
+        nodeIdToTotalSharedOffHeapNode.put(node, value);
     }
 
     @Override
-    public Map<String, Double> getNodeIdToTotalSharedOffHeapMemory() {
-        return nodeIdToTotalSharedOffHeap;
+    public Map<String, Double> getNodeIdToTotalSharedOffHeapNodeMemory() {
+        return nodeIdToTotalSharedOffHeapNode;
     }
 }
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/Topologies.java b/storm-server/src/main/java/org/apache/storm/scheduler/Topologies.java
index b222916..d135afb 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/Topologies.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/Topologies.java
@@ -41,9 +41,9 @@
     }
 
     /**
-     * Create a new Topologies from a map of id to topology
+     * Create a new Topologies from a map of id to topology.
      *
-     * @param topologies a map of topology id to topology details.
+     * @param topologies a map of topology id to topology details
      */
     public Topologies(Map<String, TopologyDetails> topologies) {
         if (topologies == null) {
@@ -81,10 +81,10 @@
     }
 
     /**
-     * Get a topology given an ID
+     * Get a topology given an ID.
      *
      * @param topologyId the id of the topology to get
-     * @return the topology or null if it is not found.
+     * @return the topology or null if it is not found
      */
     public TopologyDetails getById(String topologyId) {
         return topologies.get(topologyId);
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/TopologyDetails.java b/storm-server/src/main/java/org/apache/storm/scheduler/TopologyDetails.java
index 757f256..660f3d8 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/TopologyDetails.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/TopologyDetails.java
@@ -352,8 +352,7 @@
     /**
      * Get the total CPU requirement for executor.
      *
-     * @param exec
-     * @return Map<String   ,       Double> generic resource mapping requirement for the executor
+     * @return generic resource mapping requirement for the executor
      */
     public Double getTotalCpuReqTask(ExecutorDetails exec) {
         if (hasExecInTopo(exec)) {
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/BlacklistScheduler.java b/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/BlacklistScheduler.java
index 54c10f9..0527335 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/BlacklistScheduler.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/BlacklistScheduler.java
@@ -13,6 +13,7 @@
 package org.apache.storm.scheduler.blacklist;
 
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -53,7 +54,8 @@
     //key is supervisor key ,value is supervisor ports
     protected EvictingQueue<HashMap<String, Set<Integer>>> badSupervisorsToleranceSlidingWindow;
     protected int windowSize;
-    protected Set<String> blacklistHost;
+    protected volatile Set<String> blacklistedSupervisorIds;     // supervisor ids
+    private boolean blacklistOnBadSlots;
     private Map<String, Object> conf;
 
     public BlacklistScheduler(IScheduler underlyingScheduler, StormMetricsRegistry metricsRegistry) {
@@ -88,10 +90,13 @@
         windowSize = toleranceTime / nimbusMonitorFreqSecs;
         badSupervisorsToleranceSlidingWindow = EvictingQueue.create(windowSize);
         cachedSupervisors = new HashMap<>();
-        blacklistHost = new HashSet<>();
+        blacklistedSupervisorIds = new HashSet<>();
+        blacklistOnBadSlots = ObjectReader.getBoolean(
+                this.conf.get(DaemonConfig.BLACKLIST_SCHEDULER_ASSUME_SUPERVISOR_BAD_BASED_ON_BAD_SLOT),
+                true);
 
         //nimbus:num-blacklisted-supervisor + non-blacklisted supervisor = nimbus:num-supervisors
-        metricsRegistry.registerGauge("nimbus:num-blacklisted-supervisor", () -> blacklistHost.size());
+        metricsRegistry.registerGauge("nimbus:num-blacklisted-supervisor", () -> blacklistedSupervisorIds.size());
     }
 
     @Override
@@ -110,8 +115,8 @@
         Map<String, SupervisorDetails> supervisors = cluster.getSupervisors();
         blacklistStrategy.resumeFromBlacklist();
         badSupervisors(supervisors);
-        Set<String> blacklistHosts = getBlacklistHosts(cluster, topologies);
-        this.blacklistHost = blacklistHosts;
+        blacklistedSupervisorIds = refreshBlacklistedSupervisorIds(cluster, topologies);
+        Set<String> blacklistHosts = getBlacklistHosts(cluster, blacklistedSupervisorIds);
         cluster.setBlacklistedHosts(blacklistHosts);
         removeLongTimeDisappearFromCache();
 
@@ -132,20 +137,20 @@
         for (String key : badSupervisorKeys) {
             badSupervisors.put(key, cachedSupervisors.get(key));
         }
-
         for (Map.Entry<String, SupervisorDetails> entry : supervisors.entrySet()) {
             String key = entry.getKey();
             SupervisorDetails supervisorDetails = entry.getValue();
             if (cachedSupervisors.containsKey(key)) {
-                Set<Integer> badSlots = badSlots(supervisorDetails, key);
-                if (badSlots.size() > 0) { //supervisor contains bad slots
-                    badSupervisors.put(key, badSlots);
+                if (blacklistOnBadSlots) {
+                    Set<Integer> badSlots = badSlots(supervisorDetails, key);
+                    if (badSlots.size() > 0) { //supervisor contains bad slots
+                        badSupervisors.put(key, badSlots);
+                    }
                 }
             } else {
                 cachedSupervisors.put(key, supervisorDetails.getAllPorts()); //new supervisor to cache
             }
         }
-
         badSupervisorsToleranceSlidingWindow.add(badSupervisors);
     }
 
@@ -160,16 +165,20 @@
             allPorts.addAll(cachedSupervisorPorts);
             cachedSupervisors.put(supervisorKey, allPorts);
         }
-
         Set<Integer> badSlots = Sets.difference(cachedSupervisorPorts, supervisorPorts);
         return badSlots;
     }
 
-    private Set<String> getBlacklistHosts(Cluster cluster, Topologies topologies) {
-        Set<String> blacklistSet = blacklistStrategy.getBlacklist(new ArrayList<>(badSupervisorsToleranceSlidingWindow),
-                                                                  cluster, topologies);
+    private Set<String> refreshBlacklistedSupervisorIds(Cluster cluster, Topologies topologies) {
+        Set<String> blacklistedSupervisors = blacklistStrategy.getBlacklist(new ArrayList<>(badSupervisorsToleranceSlidingWindow),
+                cluster, topologies);
+        LOG.info("Supervisors {} are blacklisted.", blacklistedSupervisors);
+        return blacklistedSupervisors;
+    }
+
+    private Set<String> getBlacklistHosts(Cluster cluster, Set<String> blacklistIds) {
         Set<String> blacklistHostSet = new HashSet<>();
-        for (String supervisor : blacklistSet) {
+        for (String supervisor : blacklistIds) {
             String host = cluster.getHost(supervisor);
             if (host != null) {
                 blacklistHostSet.add(host);
@@ -193,7 +202,8 @@
             for (String supervisor : supervisors) {
                 int supervisorCount = supervisorCountMap.getOrDefault(supervisor, 0);
                 Set<Integer> slots = item.get(supervisor);
-                if (slots.equals(cachedSupervisors.get(supervisor))) { // treat supervisor as bad only if all of its slots matched the cached supervisor
+                // treat supervisor as bad only if all of its slots matched the cached supervisor
+                if (slots.equals(cachedSupervisors.get(supervisor))) {
                     // track how many times a cached supervisor has been marked bad
                     supervisorCountMap.put(supervisor, supervisorCount + 1);
                 }
@@ -251,4 +261,8 @@
             throw e;
         }
     }
-}
\ No newline at end of file
+
+    public Set<String> getBlacklistSupervisorIds() {
+        return Collections.unmodifiableSet(blacklistedSupervisorIds);
+    }
+}
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/strategies/RasBlacklistStrategy.java b/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/strategies/RasBlacklistStrategy.java
index 574fe91..4273f79 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/strategies/RasBlacklistStrategy.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/blacklist/strategies/RasBlacklistStrategy.java
@@ -54,7 +54,7 @@
                 if (cluster.needsSchedulingRas(td)) {
                     int slots = 0;
                     try {
-                        slots = ServerUtils.getEstimatedWorkerCountForRASTopo(td.getConf(), td.getTopology());
+                        slots = ServerUtils.getEstimatedWorkerCountForRasTopo(td.getConf(), td.getTopology());
                     } catch (InvalidTopologyException e) {
                         LOG.warn("Could not guess the number of slots needed for {}", td.getName(), e);
                     }
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/DefaultPool.java b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/DefaultPool.java
index 1b6ac91..05ccfb7 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/DefaultPool.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/DefaultPool.java
@@ -25,23 +25,23 @@
 import org.slf4j.LoggerFactory;
 
 /**
- * A pool of machines that anyone can use, but topologies are not isolated
+ * A pool of machines that anyone can use, but topologies are not isolated.
  */
 public class DefaultPool extends NodePool {
     private static final Logger LOG = LoggerFactory.getLogger(DefaultPool.class);
-    private Set<Node> _nodes = new HashSet<>();
-    private HashMap<String, TopologyDetails> _tds = new HashMap<>();
+    private Set<Node> nodes = new HashSet<>();
+    private HashMap<String, TopologyDetails> tds = new HashMap<>();
 
     @Override
     public void addTopology(TopologyDetails td) {
         String topId = td.getId();
         LOG.debug("Adding in Topology {}", topId);
-        _tds.put(topId, td);
-        SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
+        tds.put(topId, td);
+        SchedulerAssignment assignment = cluster.getAssignmentById(topId);
         if (assignment != null) {
             for (WorkerSlot ws : assignment.getSlots()) {
-                Node n = _nodeIdToNode.get(ws.getNodeId());
-                _nodes.add(n);
+                Node n = nodeIdToNode.get(ws.getNodeId());
+                nodes.add(n);
             }
         }
     }
@@ -54,15 +54,15 @@
     @Override
     public Collection<Node> takeNodes(int nodesNeeded) {
         HashSet<Node> ret = new HashSet<>();
-        LinkedList<Node> sortedNodes = new LinkedList<>(_nodes);
+        LinkedList<Node> sortedNodes = new LinkedList<>(nodes);
         Collections.sort(sortedNodes, Node.FREE_NODE_COMPARATOR_DEC);
         for (Node n : sortedNodes) {
             if (nodesNeeded <= ret.size()) {
                 break;
             }
             if (n.isAlive()) {
-                n.freeAllSlots(_cluster);
-                _nodes.remove(n);
+                n.freeAllSlots(cluster);
+                nodes.remove(n);
                 ret.add(n);
             }
         }
@@ -72,22 +72,24 @@
     @Override
     public int nodesAvailable() {
         int total = 0;
-        for (Node n : _nodes) {
-            if (n.isAlive()) total++;
+        for (Node n : nodes) {
+            if (n.isAlive()) {
+                total++;
+            }
         }
         return total;
     }
 
     @Override
     public int slotsAvailable() {
-        return Node.countTotalSlotsAlive(_nodes);
+        return Node.countTotalSlotsAlive(nodes);
     }
 
     @Override
     public NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slotsNeeded) {
         int nodesFound = 0;
         int slotsFound = 0;
-        LinkedList<Node> sortedNodes = new LinkedList<>(_nodes);
+        LinkedList<Node> sortedNodes = new LinkedList<>(nodes);
         Collections.sort(sortedNodes, Node.FREE_NODE_COMPARATOR_DEC);
         for (Node n : sortedNodes) {
             if (slotsNeeded <= 0) {
@@ -106,15 +108,15 @@
     @Override
     public Collection<Node> takeNodesBySlots(int slotsNeeded) {
         HashSet<Node> ret = new HashSet<>();
-        LinkedList<Node> sortedNodes = new LinkedList<>(_nodes);
+        LinkedList<Node> sortedNodes = new LinkedList<>(nodes);
         Collections.sort(sortedNodes, Node.FREE_NODE_COMPARATOR_DEC);
         for (Node n : sortedNodes) {
             if (slotsNeeded <= 0) {
                 break;
             }
             if (n.isAlive()) {
-                n.freeAllSlots(_cluster);
-                _nodes.remove(n);
+                n.freeAllSlots(cluster);
+                nodes.remove(n);
                 ret.add(n);
                 slotsNeeded -= n.totalSlotsFree();
             }
@@ -124,35 +126,38 @@
 
     @Override
     public void scheduleAsNeeded(NodePool... lesserPools) {
-        for (TopologyDetails td : _tds.values()) {
+        for (TopologyDetails td : tds.values()) {
             String topId = td.getId();
-            if (_cluster.needsScheduling(td)) {
+            if (cluster.needsScheduling(td)) {
                 LOG.debug("Scheduling topology {}", topId);
                 int totalTasks = td.getExecutors().size();
                 int origRequest = td.getNumWorkers();
                 int slotsRequested = Math.min(totalTasks, origRequest);
-                int slotsUsed = Node.countSlotsUsed(topId, _nodes);
-                int slotsFree = Node.countFreeSlotsAlive(_nodes);
+                int slotsUsed = Node.countSlotsUsed(topId, nodes);
+                int slotsFree = Node.countFreeSlotsAlive(nodes);
                 //Check to see if we have enough slots before trying to get them
                 int slotsAvailable = 0;
                 if (slotsRequested > slotsFree) {
                     slotsAvailable = NodePool.slotsAvailable(lesserPools);
                 }
                 int slotsToUse = Math.min(slotsRequested - slotsUsed, slotsFree + slotsAvailable);
-                int executorsNotRunning = _cluster.getUnassignedExecutors(td).size();
+                int executorsNotRunning = cluster.getUnassignedExecutors(td).size();
                 LOG.debug("Slots... requested {} used {} free {} available {} to be used {}, executors not running {}",
                           slotsRequested, slotsUsed, slotsFree, slotsAvailable, slotsToUse, executorsNotRunning);
                 if (slotsToUse <= 0) {
                     if (executorsNotRunning > 0) {
-                        _cluster.setStatus(topId, "Not fully scheduled (No free slots in default pool) " + executorsNotRunning +
-                                                  " executors not scheduled");
+                        cluster.setStatus(topId, "Not fully scheduled (No free slots in default pool) "
+                                + executorsNotRunning
+                                + " executors not scheduled");
                     } else {
                         if (slotsUsed < slotsRequested) {
-                            _cluster.setStatus(topId, "Running with fewer slots than requested (" + slotsUsed + "/" + origRequest + ")");
+                            cluster.setStatus(topId, "Running with fewer slots than requested ("
+                                    + slotsUsed + "/"
+                                    + origRequest + ")");
                         } else { //slotsUsed < origRequest
-                            _cluster.setStatus(topId,
-                                               "Fully Scheduled (requested " + origRequest + " slots, but could only use " + slotsUsed +
-                                               ")");
+                            cluster.setStatus(topId,
+                                    "Fully Scheduled (requested " + origRequest
+                                            + " slots, but could only use " + slotsUsed + ")");
                         }
                     }
                     continue;
@@ -160,28 +165,28 @@
 
                 int slotsNeeded = slotsToUse - slotsFree;
                 if (slotsNeeded > 0) {
-                    _nodes.addAll(NodePool.takeNodesBySlot(slotsNeeded, lesserPools));
+                    nodes.addAll(NodePool.takeNodesBySlot(slotsNeeded, lesserPools));
                 }
 
                 if (executorsNotRunning <= 0) {
                     //There are free slots that we can take advantage of now.
-                    for (Node n : _nodes) {
-                        n.freeTopology(topId, _cluster);
+                    for (Node n : nodes) {
+                        n.freeTopology(topId, cluster);
                     }
-                    slotsFree = Node.countFreeSlotsAlive(_nodes);
+                    slotsFree = Node.countFreeSlotsAlive(nodes);
                     slotsToUse = Math.min(slotsRequested, slotsFree);
                 }
 
                 RoundRobinSlotScheduler slotSched =
-                    new RoundRobinSlotScheduler(td, slotsToUse, _cluster);
+                    new RoundRobinSlotScheduler(td, slotsToUse, cluster);
 
-                LinkedList<Node> nodes = new LinkedList<>(_nodes);
+                LinkedList<Node> nodes = new LinkedList<>(this.nodes);
                 while (true) {
                     Node n;
                     do {
                         if (nodes.isEmpty()) {
-                            throw new IllegalStateException("This should not happen, we" +
-                                                            " messed up and did not get enough slots");
+                            throw new IllegalStateException("This should not happen, we"
+                                    + " messed up and did not get enough slots");
                         }
                         n = nodes.peekFirst();
                         if (n.totalSlotsFree() == 0) {
@@ -193,24 +198,28 @@
                         break;
                     }
                 }
-                int afterSchedSlotsUsed = Node.countSlotsUsed(topId, _nodes);
+                int afterSchedSlotsUsed = Node.countSlotsUsed(topId, this.nodes);
                 if (afterSchedSlotsUsed < slotsRequested) {
-                    _cluster.setStatus(topId, "Running with fewer slots than requested (" + afterSchedSlotsUsed + "/" + origRequest + ")");
+                    cluster.setStatus(topId, "Running with fewer slots than requested ("
+                            + afterSchedSlotsUsed + "/" + origRequest + ")");
                 } else if (afterSchedSlotsUsed < origRequest) {
-                    _cluster.setStatus(topId,
-                                       "Fully Scheduled (requested " + origRequest + " slots, but could only use " + afterSchedSlotsUsed +
-                                       ")");
+                    cluster.setStatus(topId,
+                            "Fully Scheduled (requested "
+                                    + origRequest
+                                    + " slots, but could only use "
+                                    + afterSchedSlotsUsed
+                                    + ")");
                 } else {
-                    _cluster.setStatus(topId, "Fully Scheduled");
+                    cluster.setStatus(topId, "Fully Scheduled");
                 }
             } else {
-                _cluster.setStatus(topId, "Fully Scheduled");
+                cluster.setStatus(topId, "Fully Scheduled");
             }
         }
     }
 
     @Override
     public String toString() {
-        return "DefaultPool  " + _nodes.size() + " nodes " + _tds.size() + " topologies";
+        return "DefaultPool  " + nodes.size() + " nodes " + tds.size() + " topologies";
     }
 }
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/FreePool.java b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/FreePool.java
index e868223..867baa4 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/FreePool.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/FreePool.java
@@ -23,23 +23,23 @@
 import org.slf4j.LoggerFactory;
 
 /**
- * All of the machines that currently have nothing assigned to them
+ * All of the machines that currently have nothing assigned to them.
  */
 public class FreePool extends NodePool {
     private static final Logger LOG = LoggerFactory.getLogger(FreePool.class);
-    private Set<Node> _nodes = new HashSet<>();
-    private int _totalSlots = 0;
+    private Set<Node> nodes = new HashSet<>();
+    private int totalSlots = 0;
 
     @Override
     public void init(Cluster cluster, Map<String, Node> nodeIdToNode) {
         super.init(cluster, nodeIdToNode);
         for (Node n : nodeIdToNode.values()) {
             if (n.isTotallyFree() && n.isAlive()) {
-                _nodes.add(n);
-                _totalSlots += n.totalSlotsFree();
+                nodes.add(n);
+                totalSlots += n.totalSlotsFree();
             }
         }
-        LOG.debug("Found {} nodes with {} slots", _nodes.size(), _totalSlots);
+        LOG.debug("Found {} nodes with {} slots", nodes.size(), totalSlots);
     }
 
     @Override
@@ -56,11 +56,11 @@
     @Override
     public Collection<Node> takeNodes(int nodesNeeded) {
         HashSet<Node> ret = new HashSet<>();
-        Iterator<Node> it = _nodes.iterator();
+        Iterator<Node> it = nodes.iterator();
         while (it.hasNext() && nodesNeeded > ret.size()) {
             Node n = it.next();
             ret.add(n);
-            _totalSlots -= n.totalSlotsFree();
+            totalSlots -= n.totalSlotsFree();
             it.remove();
         }
         return ret;
@@ -68,22 +68,22 @@
 
     @Override
     public int nodesAvailable() {
-        return _nodes.size();
+        return nodes.size();
     }
 
     @Override
     public int slotsAvailable() {
-        return _totalSlots;
+        return totalSlots;
     }
 
     @Override
     public Collection<Node> takeNodesBySlots(int slotsNeeded) {
         HashSet<Node> ret = new HashSet<>();
-        Iterator<Node> it = _nodes.iterator();
+        Iterator<Node> it = nodes.iterator();
         while (it.hasNext() && slotsNeeded > 0) {
             Node n = it.next();
             ret.add(n);
-            _totalSlots -= n.totalSlotsFree();
+            totalSlots -= n.totalSlotsFree();
             slotsNeeded -= n.totalSlotsFree();
             it.remove();
         }
@@ -94,7 +94,7 @@
     public NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slotsNeeded) {
         int slotsFound = 0;
         int nodesFound = 0;
-        Iterator<Node> it = _nodes.iterator();
+        Iterator<Node> it = nodes.iterator();
         while (it.hasNext() && slotsNeeded > 0) {
             Node n = it.next();
             nodesFound++;
@@ -112,6 +112,6 @@
 
     @Override
     public String toString() {
-        return "FreePool of " + _nodes.size() + " nodes with " + _totalSlots + " slots";
+        return "FreePool of " + nodes.size() + " nodes with " + totalSlots + " slots";
     }
 }
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/IsolatedPool.java b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/IsolatedPool.java
index 19ce712..77f9a3d 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/IsolatedPool.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/IsolatedPool.java
@@ -27,38 +27,38 @@
 import org.slf4j.LoggerFactory;
 
 /**
- * A pool of machines that can be used to run isolated topologies
+ * A pool of machines that can be used to run isolated topologies.
  */
 public class IsolatedPool extends NodePool {
     private static final Logger LOG = LoggerFactory.getLogger(IsolatedPool.class);
-    private Map<String, Set<Node>> _topologyIdToNodes = new HashMap<>();
-    private HashMap<String, TopologyDetails> _tds = new HashMap<>();
-    private HashSet<String> _isolated = new HashSet<>();
-    private int _maxNodes;
-    private int _usedNodes;
+    private Map<String, Set<Node>> topologyIdToNodes = new HashMap<>();
+    private HashMap<String, TopologyDetails> tds = new HashMap<>();
+    private HashSet<String> isolated = new HashSet<>();
+    private int maxNodes;
+    private int usedNodes;
 
     public IsolatedPool(int maxNodes) {
-        _maxNodes = maxNodes;
-        _usedNodes = 0;
+        this.maxNodes = maxNodes;
+        usedNodes = 0;
     }
 
     @Override
     public void addTopology(TopologyDetails td) {
         String topId = td.getId();
         LOG.debug("Adding in Topology {}", topId);
-        SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
+        SchedulerAssignment assignment = cluster.getAssignmentById(topId);
         Set<Node> assignedNodes = new HashSet<>();
         if (assignment != null) {
             for (WorkerSlot ws : assignment.getSlots()) {
-                Node n = _nodeIdToNode.get(ws.getNodeId());
+                Node n = nodeIdToNode.get(ws.getNodeId());
                 assignedNodes.add(n);
             }
         }
-        _usedNodes += assignedNodes.size();
-        _topologyIdToNodes.put(topId, assignedNodes);
-        _tds.put(topId, td);
+        usedNodes += assignedNodes.size();
+        topologyIdToNodes.put(topId, assignedNodes);
+        tds.put(topId, td);
         if (td.getConf().get(Config.TOPOLOGY_ISOLATED_MACHINES) != null) {
-            _isolated.add(topId);
+            isolated.add(topId);
         }
     }
 
@@ -66,10 +66,10 @@
     public boolean canAdd(TopologyDetails td) {
         //Only add topologies that are not sharing nodes with other topologies
         String topId = td.getId();
-        SchedulerAssignment assignment = _cluster.getAssignmentById(topId);
+        SchedulerAssignment assignment = cluster.getAssignmentById(topId);
         if (assignment != null) {
             for (WorkerSlot ws : assignment.getSlots()) {
-                Node n = _nodeIdToNode.get(ws.getNodeId());
+                Node n = nodeIdToNode.get(ws.getNodeId());
                 if (n.getRunningTopologies().size() > 1) {
                     return false;
                 }
@@ -80,18 +80,17 @@
 
     @Override
     public void scheduleAsNeeded(NodePool... lesserPools) {
-        for (String topId : _topologyIdToNodes.keySet()) {
-            TopologyDetails td = _tds.get(topId);
-            Set<Node> allNodes = _topologyIdToNodes.get(topId);
+        for (String topId : topologyIdToNodes.keySet()) {
+            TopologyDetails td = tds.get(topId);
+            Set<Node> allNodes = topologyIdToNodes.get(topId);
             Number nodesRequested = (Number) td.getConf().get(Config.TOPOLOGY_ISOLATED_MACHINES);
             Integer effectiveNodesRequested = null;
             if (nodesRequested != null) {
                 effectiveNodesRequested = Math.min(td.getExecutors().size(),
                                                    +nodesRequested.intValue());
             }
-            if (_cluster.needsScheduling(td) ||
-                (effectiveNodesRequested != null &&
-                 allNodes.size() != effectiveNodesRequested)) {
+            if (cluster.needsScheduling(td)
+                    || (effectiveNodesRequested != null && allNodes.size() != effectiveNodesRequested)) {
                 LOG.debug("Scheduling topology {}", topId);
                 int slotsToUse = 0;
                 if (effectiveNodesRequested == null) {
@@ -106,7 +105,7 @@
                 }
 
                 RoundRobinSlotScheduler slotSched =
-                    new RoundRobinSlotScheduler(td, slotsToUse, _cluster);
+                    new RoundRobinSlotScheduler(td, slotsToUse, cluster);
 
                 LOG.debug("Nodes sorted by free space {}", allNodes);
                 while (true) {
@@ -120,9 +119,9 @@
                     }
                 }
             }
-            Set<Node> found = _topologyIdToNodes.get(topId);
+            Set<Node> found = topologyIdToNodes.get(topId);
             int nc = found == null ? 0 : found.size();
-            _cluster.setStatus(topId, "Scheduled Isolated on " + nc + " Nodes");
+            cluster.setStatus(topId, "Scheduled Isolated on " + nc + " Nodes");
         }
     }
 
@@ -152,7 +151,7 @@
      * Get the nodes needed to schedule an isolated topology.
      * @param td the topology to be scheduled
      * @param allNodes the nodes already scheduled for this topology.
-     * This will be updated to include new nodes if needed.
+     *     This will be updated to include new nodes if needed.
      * @param lesserPools node pools we can steal nodes from
      * @return the number of additional slots that should be used for scheduling.
      */
@@ -163,39 +162,45 @@
         int nodesFromUsAvailable = nodesAvailable();
         int nodesFromOthersAvailable = NodePool.nodesAvailable(lesserPools);
 
-        int nodesUsed = _topologyIdToNodes.get(topId).size();
+        int nodesUsed = topologyIdToNodes.get(topId).size();
         int nodesNeeded = nodesRequested - nodesUsed;
-        LOG.debug("Nodes... requested {} used {} available from us {} " +
-                  "avail from other {} needed {}", nodesRequested,
-                  nodesUsed, nodesFromUsAvailable, nodesFromOthersAvailable,
-                  nodesNeeded);
-        if ((nodesNeeded - nodesFromUsAvailable) > (_maxNodes - _usedNodes)) {
-            _cluster.setStatus(topId, "Max Nodes(" + _maxNodes + ") for this user would be exceeded. "
-                                      + ((nodesNeeded - nodesFromUsAvailable) - (_maxNodes - _usedNodes))
-                                      + " more nodes needed to run topology.");
+        LOG.debug("Nodes... requested {} used {} available from us {} "
+                        + "avail from other {} needed {}",
+                nodesRequested,
+                nodesUsed,
+                nodesFromUsAvailable,
+                nodesFromOthersAvailable,
+                nodesNeeded);
+        if ((nodesNeeded - nodesFromUsAvailable) > (maxNodes - usedNodes)) {
+            cluster.setStatus(topId,
+                    "Max Nodes("
+                            + maxNodes
+                            + ") for this user would be exceeded. "
+                            + ((nodesNeeded - nodesFromUsAvailable) - (maxNodes - usedNodes))
+                            + " more nodes needed to run topology.");
             return 0;
         }
 
-        //In order to avoid going over _maxNodes I may need to steal from
+        //In order to avoid going over maxNodes I may need to steal from
         // myself even though other pools have free nodes. so figure out how
         // much each group should provide
-        int nodesNeededFromOthers = Math.min(Math.min(_maxNodes - _usedNodes,
+        int nodesNeededFromOthers = Math.min(Math.min(maxNodes - usedNodes,
                                                       nodesFromOthersAvailable), nodesNeeded);
         int nodesNeededFromUs = nodesNeeded - nodesNeededFromOthers;
         LOG.debug("Nodes... needed from us {} needed from others {}",
                   nodesNeededFromUs, nodesNeededFromOthers);
 
         if (nodesNeededFromUs > nodesFromUsAvailable) {
-            _cluster.setStatus(topId, "Not Enough Nodes Available to Schedule Topology");
+            cluster.setStatus(topId, "Not Enough Nodes Available to Schedule Topology");
             return 0;
         }
 
         //Get the nodes
         Collection<Node> found = NodePool.takeNodes(nodesNeededFromOthers, lesserPools);
-        _usedNodes += found.size();
+        usedNodes += found.size();
         allNodes.addAll(found);
         Collection<Node> foundMore = takeNodes(nodesNeededFromUs);
-        _usedNodes += foundMore.size();
+        usedNodes += foundMore.size();
         allNodes.addAll(foundMore);
 
         int totalTasks = td.getExecutors().size();
@@ -207,13 +212,20 @@
         if (slotsToUse <= 0) {
             // if # of workers requested is more than we currently have
             if (origRequest > slotsUsed) {
-                _cluster.setStatus(topId, "Running with fewer slots than requested " + slotsUsed + "/" +
-                                          origRequest + " on " + allNodes.size() + " node(s) with " + (slotsUsed + slotsFree) +
-                                          " total slots");
+                cluster.setStatus(topId,
+                        "Running with fewer slots than requested "
+                                + slotsUsed
+                                + "/"
+                                + origRequest
+                                + " on "
+                                + allNodes.size()
+                                + " node(s) with "
+                                + (slotsUsed + slotsFree)
+                                + " total slots");
             } else {
                 // if # of workers requested is less than we took
                 // then we know some workers we track died, since we have more workers than we are supposed to have
-                _cluster.setStatus(topId, "Node has partially crashed, if this situation persists rebalance the topology.");
+                cluster.setStatus(topId, "Node has partially crashed, if this situation persists rebalance the topology.");
             }
         }
         return slotsToUse;
@@ -223,7 +235,7 @@
      * Get the nodes needed to schedule a non-isolated topology.
      * @param td the topology to be scheduled
      * @param allNodes the nodes already scheduled for this topology.
-     * This will be updated to include new nodes if needed.
+     *     This will be updated to include new nodes if needed.
      * @param lesserPools node pools we can steal nodes from
      * @return the number of additional slots that should be used for scheduling.
      */
@@ -245,21 +257,25 @@
         LOG.debug("Slots... requested {} used {} free {} available {} to be used {}",
                   slotsRequested, slotsUsed, slotsFree, slotsAvailable, slotsToUse);
         if (slotsToUse <= 0) {
-            _cluster.setStatus(topId, "Not Enough Slots Available to Schedule Topology");
+            cluster.setStatus(topId, "Not Enough Slots Available to Schedule Topology");
             return 0;
         }
         int slotsNeeded = slotsToUse - slotsFree;
         int numNewNodes = NodePool.getNodeCountIfSlotsWereTaken(slotsNeeded, lesserPools);
         LOG.debug("Nodes... new {} used {} max {}",
-                  numNewNodes, _usedNodes, _maxNodes);
-        if ((numNewNodes + _usedNodes) > _maxNodes) {
-            _cluster.setStatus(topId, "Max Nodes(" + _maxNodes + ") for this user would be exceeded. " +
-                                      (numNewNodes - (_maxNodes - _usedNodes)) + " more nodes needed to run topology.");
+                  numNewNodes, usedNodes, maxNodes);
+        if ((numNewNodes + usedNodes) > maxNodes) {
+            cluster.setStatus(topId,
+                    "Max Nodes("
+                            + maxNodes
+                            + ") for this user would be exceeded. "
+                            + (numNewNodes - (maxNodes - usedNodes))
+                            + " more nodes needed to run topology.");
             return 0;
         }
 
         Collection<Node> found = NodePool.takeNodesBySlot(slotsNeeded, lesserPools);
-        _usedNodes += found.size();
+        usedNodes += found.size();
         allNodes.addAll(found);
         return slotsToUse;
     }
@@ -268,8 +284,8 @@
     public Collection<Node> takeNodes(int nodesNeeded) {
         LOG.debug("Taking {} from {}", nodesNeeded, this);
         HashSet<Node> ret = new HashSet<>();
-        for (Entry<String, Set<Node>> entry : _topologyIdToNodes.entrySet()) {
-            if (!_isolated.contains(entry.getKey())) {
+        for (Entry<String, Set<Node>> entry : topologyIdToNodes.entrySet()) {
+            if (!isolated.contains(entry.getKey())) {
                 Iterator<Node> it = entry.getValue().iterator();
                 while (it.hasNext()) {
                     if (nodesNeeded <= 0) {
@@ -277,10 +293,10 @@
                     }
                     Node n = it.next();
                     it.remove();
-                    n.freeAllSlots(_cluster);
+                    n.freeAllSlots(cluster);
                     ret.add(n);
                     nodesNeeded--;
-                    _usedNodes--;
+                    usedNodes--;
                 }
             }
         }
@@ -290,8 +306,8 @@
     @Override
     public int nodesAvailable() {
         int total = 0;
-        for (Entry<String, Set<Node>> entry : _topologyIdToNodes.entrySet()) {
-            if (!_isolated.contains(entry.getKey())) {
+        for (Entry<String, Set<Node>> entry : topologyIdToNodes.entrySet()) {
+            if (!isolated.contains(entry.getKey())) {
                 total += entry.getValue().size();
             }
         }
@@ -301,8 +317,8 @@
     @Override
     public int slotsAvailable() {
         int total = 0;
-        for (Entry<String, Set<Node>> entry : _topologyIdToNodes.entrySet()) {
-            if (!_isolated.contains(entry.getKey())) {
+        for (Entry<String, Set<Node>> entry : topologyIdToNodes.entrySet()) {
+            if (!isolated.contains(entry.getKey())) {
                 total += Node.countTotalSlotsAlive(entry.getValue());
             }
         }
@@ -312,15 +328,15 @@
     @Override
     public Collection<Node> takeNodesBySlots(int slotsNeeded) {
         HashSet<Node> ret = new HashSet<>();
-        for (Entry<String, Set<Node>> entry : _topologyIdToNodes.entrySet()) {
-            if (!_isolated.contains(entry.getKey())) {
+        for (Entry<String, Set<Node>> entry : topologyIdToNodes.entrySet()) {
+            if (!isolated.contains(entry.getKey())) {
                 Iterator<Node> it = entry.getValue().iterator();
                 while (it.hasNext()) {
                     Node n = it.next();
                     if (n.isAlive()) {
                         it.remove();
-                        _usedNodes--;
-                        n.freeAllSlots(_cluster);
+                        usedNodes--;
+                        n.freeAllSlots(cluster);
                         ret.add(n);
                         slotsNeeded -= n.totalSlots();
                         if (slotsNeeded <= 0) {
@@ -337,8 +353,8 @@
     public NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slotsNeeded) {
         int nodesFound = 0;
         int slotsFound = 0;
-        for (Entry<String, Set<Node>> entry : _topologyIdToNodes.entrySet()) {
-            if (!_isolated.contains(entry.getKey())) {
+        for (Entry<String, Set<Node>> entry : topologyIdToNodes.entrySet()) {
+            if (!isolated.contains(entry.getKey())) {
                 for (Node n : entry.getValue()) {
                     if (n.isAlive()) {
                         nodesFound++;
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/Node.java b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/Node.java
index 89fe462..0c2ba56 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/Node.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/Node.java
@@ -42,17 +42,17 @@
         }
     };
     private static final Logger LOG = LoggerFactory.getLogger(Node.class);
-    private final String _nodeId;
-    private Map<String, Set<WorkerSlot>> _topIdToUsedSlots = new HashMap<>();
-    private Set<WorkerSlot> _freeSlots = new HashSet<>();
-    private boolean _isAlive;
+    private final String nodeId;
+    private Map<String, Set<WorkerSlot>> topIdToUsedSlots = new HashMap<>();
+    private Set<WorkerSlot> freeSlots = new HashSet<>();
+    private boolean isAlive;
 
     public Node(String nodeId, Set<Integer> allPorts, boolean isAlive) {
-        _nodeId = nodeId;
-        _isAlive = isAlive;
-        if (_isAlive && allPorts != null) {
+        this.nodeId = nodeId;
+        this.isAlive = isAlive;
+        if (this.isAlive && allPorts != null) {
             for (int port : allPorts) {
-                _freeSlots.add(new WorkerSlot(_nodeId, port));
+                freeSlots.add(new WorkerSlot(this.nodeId, port));
             }
         }
     }
@@ -119,8 +119,11 @@
                     node.addOrphanedSlot(ws);
                 }
                 if (node.assignInternal(ws, topId, true)) {
-                    LOG.warn("Bad scheduling state for topology [" + topId + "], the slot " +
-                             ws + " assigned to multiple workers, un-assigning everything...");
+                    LOG.warn("Bad scheduling state for topology ["
+                            + topId
+                            + "], the slot "
+                            + ws
+                            + " assigned to multiple workers, un-assigning everything...");
                     node.free(ws, cluster, true);
                 }
             }
@@ -130,95 +133,110 @@
     }
 
     public String getId() {
-        return _nodeId;
+        return nodeId;
     }
 
     public boolean isAlive() {
-        return _isAlive;
+        return isAlive;
     }
 
     /**
+     * Get running topologies.
      * @return a collection of the topology ids currently running on this node
      */
     public Collection<String> getRunningTopologies() {
-        return _topIdToUsedSlots.keySet();
+        return topIdToUsedSlots.keySet();
     }
 
     public boolean isTotallyFree() {
-        return _topIdToUsedSlots.isEmpty();
+        return topIdToUsedSlots.isEmpty();
     }
 
     public int totalSlotsFree() {
-        return _freeSlots.size();
+        return freeSlots.size();
     }
 
     public int totalSlotsUsed() {
         int total = 0;
-        for (Set<WorkerSlot> slots : _topIdToUsedSlots.values()) {
+        for (Set<WorkerSlot> slots : topIdToUsedSlots.values()) {
             total += slots.size();
         }
         return total;
     }
 
+    public int totalSlotsUsed(String topId) {
+        int total = 0;
+        Set<WorkerSlot> slots = topIdToUsedSlots.get(topId);
+        if (slots != null) {
+            total = slots.size();
+        }
+        return total;
+    }
+
     public int totalSlots() {
         return totalSlotsFree() + totalSlotsUsed();
     }
 
-    public int totalSlotsUsed(String topId) {
-        int total = 0;
-        Set<WorkerSlot> slots = _topIdToUsedSlots.get(topId);
-        if (slots != null) {
-            total = slots.size();
-        }
-        return total;
-    }
-
     private void validateSlot(WorkerSlot ws) {
-        if (!_nodeId.equals(ws.getNodeId())) {
-            throw new IllegalArgumentException(
-                "Trying to add a slot to the wrong node " + ws +
-                " is not a part of " + _nodeId);
+        if (!nodeId.equals(ws.getNodeId())) {
+            throw new IllegalArgumentException("Trying to add a slot to the wrong node "
+                    + ws
+                    + " is not a part of "
+                    + nodeId);
         }
     }
 
     private void addOrphanedSlot(WorkerSlot ws) {
-        if (_isAlive) {
-            throw new IllegalArgumentException("Orphaned Slots " +
-                                               "only are allowed on dead nodes.");
+        if (isAlive) {
+            throw new IllegalArgumentException("Orphaned Slots only are allowed on dead nodes.");
         }
         validateSlot(ws);
-        if (_freeSlots.contains(ws)) {
+        if (freeSlots.contains(ws)) {
             return;
         }
-        for (Set<WorkerSlot> used : _topIdToUsedSlots.values()) {
+        for (Set<WorkerSlot> used : topIdToUsedSlots.values()) {
             if (used.contains(ws)) {
                 return;
             }
         }
-        _freeSlots.add(ws);
+        freeSlots.add(ws);
     }
 
     boolean assignInternal(WorkerSlot ws, String topId, boolean dontThrow) {
         validateSlot(ws);
-        if (!_freeSlots.remove(ws)) {
-            for (Entry<String, Set<WorkerSlot>> topologySetEntry : _topIdToUsedSlots.entrySet()) {
+        if (!freeSlots.remove(ws)) {
+            for (Entry<String, Set<WorkerSlot>> topologySetEntry : topIdToUsedSlots.entrySet()) {
                 if (topologySetEntry.getValue().contains(ws)) {
                     if (dontThrow) {
-                        LOG.warn("Worker slot [" + ws + "] can't be assigned to " + topId +
-                                 ". Its already assigned to " + topologySetEntry.getKey() + ".");
+                        LOG.warn("Worker slot ["
+                                + ws
+                                + "] can't be assigned to "
+                                + topId
+                                + ". Its already assigned to "
+                                + topologySetEntry.getKey()
+                                + ".");
                         return true;
                     }
-                    throw new IllegalStateException("Worker slot [" + ws + "] can't be assigned to "
-                                                    + topId + ". Its already assigned to " + topologySetEntry.getKey() + ".");
+                    throw new IllegalStateException("Worker slot ["
+                            + ws
+                            + "] can't be assigned to "
+                            + topId
+                            + ". Its already assigned to "
+                            + topologySetEntry.getKey()
+                            + ".");
                 }
             }
-            LOG.warn("Adding Worker slot [" + ws + "] that was not reported in the supervisor heartbeats," +
-                     " but the worker is already running for topology " + topId + ".");
+            LOG.warn("Adding Worker slot ["
+                    + ws
+                    + "] that was not reported in the supervisor heartbeats,"
+                    + " but the worker is already running for topology "
+                    + topId
+                    + ".");
         }
-        Set<WorkerSlot> usedSlots = _topIdToUsedSlots.get(topId);
+        Set<WorkerSlot> usedSlots = topIdToUsedSlots.get(topId);
         if (usedSlots == null) {
             usedSlots = new HashSet<>();
-            _topIdToUsedSlots.put(topId, usedSlots);
+            topIdToUsedSlots.put(topId, usedSlots);
         }
         usedSlots.add(ws);
         return false;
@@ -229,32 +247,34 @@
      * @param cluster the cluster to be updated
      */
     public void freeAllSlots(Cluster cluster) {
-        if (!_isAlive) {
-            LOG.warn("Freeing all slots on a dead node {} ", _nodeId);
+        if (!isAlive) {
+            LOG.warn("Freeing all slots on a dead node {} ", nodeId);
         }
-        for (Entry<String, Set<WorkerSlot>> entry : _topIdToUsedSlots.entrySet()) {
+        for (Entry<String, Set<WorkerSlot>> entry : topIdToUsedSlots.entrySet()) {
             cluster.freeSlots(entry.getValue());
-            if (_isAlive) {
-                _freeSlots.addAll(entry.getValue());
+            if (isAlive) {
+                freeSlots.addAll(entry.getValue());
             }
         }
-        _topIdToUsedSlots = new HashMap<>();
+        topIdToUsedSlots = new HashMap<>();
     }
 
     /**
-     * Frees a single slot in this node
+     * Frees a single slot in this node.
      * @param ws the slot to free
      * @param cluster the cluster to update
      */
     public void free(WorkerSlot ws, Cluster cluster, boolean forceFree) {
-        if (_freeSlots.contains(ws)) return;
+        if (freeSlots.contains(ws)) {
+            return;
+        }
         boolean wasFound = false;
-        for (Entry<String, Set<WorkerSlot>> entry : _topIdToUsedSlots.entrySet()) {
+        for (Entry<String, Set<WorkerSlot>> entry : topIdToUsedSlots.entrySet()) {
             Set<WorkerSlot> slots = entry.getValue();
             if (slots.remove(ws)) {
                 cluster.freeSlot(ws);
-                if (_isAlive) {
-                    _freeSlots.add(ws);
+                if (isAlive) {
+                    freeSlots.add(ws);
                 }
                 wasFound = true;
             }
@@ -263,10 +283,11 @@
             if (forceFree) {
                 LOG.info("Forcefully freeing the " + ws);
                 cluster.freeSlot(ws);
-                _freeSlots.add(ws);
+                freeSlots.add(ws);
             } else {
-                throw new IllegalArgumentException("Tried to free a slot that was not" +
-                                                   " part of this node " + _nodeId);
+                throw new IllegalArgumentException("Tried to free a slot that was not"
+                        + " part of this node "
+                        + nodeId);
             }
         }
     }
@@ -277,15 +298,17 @@
      * @param cluster the cluster to update
      */
     public void freeTopology(String topId, Cluster cluster) {
-        Set<WorkerSlot> slots = _topIdToUsedSlots.get(topId);
-        if (slots == null || slots.isEmpty()) return;
+        Set<WorkerSlot> slots = topIdToUsedSlots.get(topId);
+        if (slots == null || slots.isEmpty()) {
+            return;
+        }
         for (WorkerSlot ws : slots) {
             cluster.freeSlot(ws);
-            if (_isAlive) {
-                _freeSlots.add(ws);
+            if (isAlive) {
+                freeSlots.add(ws);
             }
         }
-        _topIdToUsedSlots.remove(topId);
+        topIdToUsedSlots.remove(topId);
     }
 
     /**
@@ -297,16 +320,16 @@
      */
     public void assign(String topId, Collection<ExecutorDetails> executors,
                        Cluster cluster) {
-        if (!_isAlive) {
-            throw new IllegalStateException("Trying to adding to a dead node " + _nodeId);
+        if (!isAlive) {
+            throw new IllegalStateException("Trying to adding to a dead node " + nodeId);
         }
-        if (_freeSlots.isEmpty()) {
-            throw new IllegalStateException("Trying to assign to a full node " + _nodeId);
+        if (freeSlots.isEmpty()) {
+            throw new IllegalStateException("Trying to assign to a full node " + nodeId);
         }
         if (executors.size() == 0) {
-            LOG.warn("Trying to assign nothing from " + topId + " to " + _nodeId + " (Ignored)");
+            LOG.warn("Trying to assign nothing from " + topId + " to " + nodeId + " (Ignored)");
         } else {
-            WorkerSlot slot = _freeSlots.iterator().next();
+            WorkerSlot slot = freeSlots.iterator().next();
             cluster.assign(slot, topId, executors);
             assignInternal(slot, topId, false);
         }
@@ -314,16 +337,16 @@
 
     @Override
     public boolean equals(Object other) {
-        return other instanceof Node && _nodeId.equals(((Node) other)._nodeId);
+        return other instanceof Node && nodeId.equals(((Node) other).nodeId);
     }
 
     @Override
     public int hashCode() {
-        return _nodeId.hashCode();
+        return nodeId.hashCode();
     }
 
     @Override
     public String toString() {
-        return "Node: " + _nodeId;
+        return "Node: " + nodeId;
     }
 }
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/NodePool.java b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/NodePool.java
index 21ffbf5..477f243 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/NodePool.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/multitenant/NodePool.java
@@ -35,8 +35,8 @@
  */
 public abstract class NodePool {
     private static final Logger LOG = LoggerFactory.getLogger(NodePool.class);
-    protected Cluster _cluster;
-    protected Map<String, Node> _nodeIdToNode;
+    protected Cluster cluster;
+    protected Map<String, Node> nodeIdToNode;
 
     public static int slotsAvailable(NodePool[] pools) {
         int slotsAvailable = 0;
@@ -46,6 +46,12 @@
         return slotsAvailable;
     }
 
+    /**
+     * Get number of available slots.
+     * @return the number of slots that are available to be taken
+     */
+    public abstract int slotsAvailable();
+
     public static int nodesAvailable(NodePool[] pools) {
         int nodesAvailable = 0;
         for (NodePool pool : pools) {
@@ -54,6 +60,12 @@
         return nodesAvailable;
     }
 
+    /**
+     * Get the number of available nodes.
+     * @return the number of nodes that are available to be taken
+     */
+    public abstract int nodesAvailable();
+
     public static Collection<Node> takeNodesBySlot(int slotsNeeded, NodePool[] pools) {
         LOG.debug("Trying to grab {} free slots from {}", slotsNeeded, pools);
         HashSet<Node> ret = new HashSet<>();
@@ -89,8 +101,8 @@
         int total = 0;
         for (NodePool pool : pools) {
             NodeAndSlotCounts ns = pool.getNodeAndSlotCountIfSlotsWereTaken(slots);
-            total += ns._nodes;
-            slots -= ns._slots;
+            total += ns.nodes;
+            slots -= ns.slots;
             LOG.debug("Found {} nodes so far {} more slots needed", total, slots);
             if (slots <= 0) {
                 break;
@@ -105,55 +117,46 @@
      * @param nodeIdToNode the mapping of node id to nodes
      */
     public void init(Cluster cluster, Map<String, Node> nodeIdToNode) {
-        _cluster = cluster;
-        _nodeIdToNode = nodeIdToNode;
+        this.cluster = cluster;
+        this.nodeIdToNode = nodeIdToNode;
     }
 
     /**
-     * Add a topology to the pool
-     * @param td the topology to add.
+     * Add a topology to the pool.
+     * @param td the topology to add
      */
     public abstract void addTopology(TopologyDetails td);
 
     /**
-     * Check if this topology can be added to this pool
+     * Check if this topology can be added to this pool.
      * @param td the topology
      * @return true if it can else false
      */
     public abstract boolean canAdd(TopologyDetails td);
 
     /**
-     * @return the number of nodes that are available to be taken
-     */
-    public abstract int slotsAvailable();
-
-    /**
-     * Take nodes from this pool that can fulfill possibly up to the
-     * slotsNeeded
+     * Take nodes from this pool that can fulfill possibly up to the slotsNeeded.
      * @param slotsNeeded the number of slots that are needed.
      * @return a Collection of nodes with the removed nodes in it.
-     * This may be empty, but should not be null.
+     *     This may be empty, but should not be null.
      */
     public abstract Collection<Node> takeNodesBySlots(int slotsNeeded);
 
     /**
-     * Get the number of nodes and slots this would provide to get the slots needed
+     * Get the number of nodes and slots this would provide to get the slots needed.
      * @param slots the number of slots needed
      * @return the number of nodes and slots that would be returned.
      */
     public abstract NodeAndSlotCounts getNodeAndSlotCountIfSlotsWereTaken(int slots);
 
     /**
-     * @return the number of nodes that are available to be taken
-     */
-    public abstract int nodesAvailable();
-
-    /**
-     * Take up to nodesNeeded from this pool
+     * Take up to nodesNeeded from this pool.
      * @param nodesNeeded the number of nodes that are needed.
      * @return a Collection of nodes with the removed nodes in it.
-     * This may be empty, but should not be null.
+     *     This may be empty, but should not be null.
      */
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
+    //simply suppress until https://github.com/checkstyle/checkstyle/issues/3770 is resolved
     public abstract Collection<Node> takeNodes(int nodesNeeded);
 
     /**
@@ -163,12 +166,12 @@
     public abstract void scheduleAsNeeded(NodePool... lesserPools);
 
     public static class NodeAndSlotCounts {
-        public final int _nodes;
-        public final int _slots;
+        public final int nodes;
+        public final int slots;
 
         public NodeAndSlotCounts(int nodes, int slots) {
-            _nodes = nodes;
-            _slots = slots;
+            this.nodes = nodes;
+            this.slots = slots;
         }
     }
 
@@ -177,74 +180,74 @@
      * component spreading among different hosts.
      */
     public static class RoundRobinSlotScheduler {
-        private Map<String, Set<String>> _nodeToComps;
-        private HashMap<String, List<ExecutorDetails>> _spreadToSchedule;
-        private LinkedList<Set<ExecutorDetails>> _slots;
-        private Set<ExecutorDetails> _lastSlot;
-        private Cluster _cluster;
-        private String _topId;
+        private Map<String, Set<String>> nodeToComps;
+        private HashMap<String, List<ExecutorDetails>> spreadToSchedule;
+        private LinkedList<Set<ExecutorDetails>> slots;
+        private Set<ExecutorDetails> lastSlot;
+        private Cluster cluster;
+        private String topId;
 
         /**
-         * Create a new scheduler for a given topology
+         * Create a new scheduler for a given topology.
          * @param td the topology to schedule
          * @param slotsToUse the number of slots to use for the executors left to
-         * schedule.
+         *     schedule.
          * @param cluster the cluster to schedule this on.
          */
         public RoundRobinSlotScheduler(TopologyDetails td, int slotsToUse,
                                        Cluster cluster) {
-            _topId = td.getId();
-            _cluster = cluster;
+            topId = td.getId();
+            this.cluster = cluster;
 
             Map<ExecutorDetails, String> execToComp = td.getExecutorToComponent();
-            SchedulerAssignment assignment = _cluster.getAssignmentById(_topId);
-            _nodeToComps = new HashMap<>();
+            SchedulerAssignment assignment = this.cluster.getAssignmentById(topId);
+            nodeToComps = new HashMap<>();
 
             if (assignment != null) {
                 Map<ExecutorDetails, WorkerSlot> execToSlot = assignment.getExecutorToSlot();
 
                 for (Entry<ExecutorDetails, WorkerSlot> entry : execToSlot.entrySet()) {
                     String nodeId = entry.getValue().getNodeId();
-                    Set<String> comps = _nodeToComps.get(nodeId);
+                    Set<String> comps = nodeToComps.get(nodeId);
                     if (comps == null) {
                         comps = new HashSet<>();
-                        _nodeToComps.put(nodeId, comps);
+                        nodeToComps.put(nodeId, comps);
                     }
                     comps.add(execToComp.get(entry.getKey()));
                 }
             }
 
-            _spreadToSchedule = new HashMap<>();
+            spreadToSchedule = new HashMap<>();
             List<String> spreadComps = (List<String>) td.getConf().get(Config.TOPOLOGY_SPREAD_COMPONENTS);
             if (spreadComps != null) {
                 for (String comp : spreadComps) {
-                    _spreadToSchedule.put(comp, new ArrayList<ExecutorDetails>());
+                    spreadToSchedule.put(comp, new ArrayList<ExecutorDetails>());
                 }
             }
 
-            _slots = new LinkedList<>();
+            slots = new LinkedList<>();
             for (int i = 0; i < slotsToUse; i++) {
-                _slots.add(new HashSet<ExecutorDetails>());
+                slots.add(new HashSet<ExecutorDetails>());
             }
 
             int at = 0;
-            for (Entry<String, List<ExecutorDetails>> entry : _cluster.getNeedsSchedulingComponentToExecutors(td).entrySet()) {
+            for (Entry<String, List<ExecutorDetails>> entry : this.cluster.getNeedsSchedulingComponentToExecutors(td).entrySet()) {
                 LOG.debug("Scheduling for {}", entry.getKey());
-                if (_spreadToSchedule.containsKey(entry.getKey())) {
+                if (spreadToSchedule.containsKey(entry.getKey())) {
                     LOG.debug("Saving {} for spread...", entry.getKey());
-                    _spreadToSchedule.get(entry.getKey()).addAll(entry.getValue());
+                    spreadToSchedule.get(entry.getKey()).addAll(entry.getValue());
                 } else {
                     for (ExecutorDetails ed : entry.getValue()) {
                         LOG.debug("Assigning {} {} to slot {}", entry.getKey(), ed, at);
-                        _slots.get(at).add(ed);
+                        slots.get(at).add(ed);
                         at++;
-                        if (at >= _slots.size()) {
+                        if (at >= slots.size()) {
                             at = 0;
                         }
                     }
                 }
             }
-            _lastSlot = _slots.get(_slots.size() - 1);
+            lastSlot = slots.get(slots.size() - 1);
         }
 
         /**
@@ -253,25 +256,25 @@
          * @return true if there are more slots to assign else false.
          */
         public boolean assignSlotTo(Node n) {
-            if (_slots.isEmpty()) {
+            if (slots.isEmpty()) {
                 return false;
             }
-            Set<ExecutorDetails> slot = _slots.pop();
-            if (slot == _lastSlot) {
+            Set<ExecutorDetails> slot = slots.pop();
+            if (slot == lastSlot) {
                 //The last slot fill it up
-                for (Entry<String, List<ExecutorDetails>> entry : _spreadToSchedule.entrySet()) {
+                for (Entry<String, List<ExecutorDetails>> entry : spreadToSchedule.entrySet()) {
                     if (entry.getValue().size() > 0) {
                         slot.addAll(entry.getValue());
                     }
                 }
             } else {
                 String nodeId = n.getId();
-                Set<String> nodeComps = _nodeToComps.get(nodeId);
+                Set<String> nodeComps = nodeToComps.get(nodeId);
                 if (nodeComps == null) {
                     nodeComps = new HashSet<>();
-                    _nodeToComps.put(nodeId, nodeComps);
+                    nodeToComps.put(nodeId, nodeComps);
                 }
-                for (Entry<String, List<ExecutorDetails>> entry : _spreadToSchedule.entrySet()) {
+                for (Entry<String, List<ExecutorDetails>> entry : spreadToSchedule.entrySet()) {
                     if (entry.getValue().size() > 0) {
                         String comp = entry.getKey();
                         if (!nodeComps.contains(comp)) {
@@ -281,8 +284,8 @@
                     }
                 }
             }
-            n.assign(_topId, slot, _cluster);
-            return !_slots.isEmpty();
+            n.assign(topId, slot, cluster);
+            return !slots.isEmpty();
         }
     }
 }
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/resource/RAS_Node.java b/storm-server/src/main/java/org/apache/storm/scheduler/resource/RasNode.java
similarity index 97%
rename from storm-server/src/main/java/org/apache/storm/scheduler/resource/RAS_Node.java
rename to storm-server/src/main/java/org/apache/storm/scheduler/resource/RasNode.java
index e1cd1cf..9d12428 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/resource/RAS_Node.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/resource/RasNode.java
@@ -19,6 +19,7 @@
 package org.apache.storm.scheduler.resource;
 
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedList;
@@ -38,8 +39,8 @@
 /**
  * Represents a single node in the cluster.
  */
-public class RAS_Node {
-    private static final Logger LOG = LoggerFactory.getLogger(RAS_Node.class);
+public class RasNode {
+    private static final Logger LOG = LoggerFactory.getLogger(RasNode.class);
     private final String nodeId;
     private final Cluster cluster;
     private final Set<WorkerSlot> originallyFreeSlots;
@@ -62,7 +63,7 @@
      * @param workerIdToWorker the mapping of slots already assigned to this node.
      * @param assignmentMap the mapping of executors already assigned to this node.
      */
-    public RAS_Node(
+    public RasNode(
         String nodeId,
         SupervisorDetails sup,
         Cluster cluster,
@@ -163,11 +164,11 @@
      * @return the slots currently assigned to that topology on this node.
      */
     public Collection<WorkerSlot> getUsedSlots(String topId) {
-        Collection<WorkerSlot> ret = null;
         if (topIdToUsedSlots.get(topId) != null) {
-            ret = workerIdsToWorkers(topIdToUsedSlots.get(topId).keySet());
+            return workerIdsToWorkers(topIdToUsedSlots.get(topId).keySet());
+        } else {
+            return Collections.emptySet();
         }
-        return ret;
     }
 
     public boolean isAlive() {
@@ -390,8 +391,8 @@
 
     @Override
     public boolean equals(Object other) {
-        if (other instanceof RAS_Node) {
-            return nodeId.equals(((RAS_Node) other).nodeId);
+        if (other instanceof RasNode) {
+            return nodeId.equals(((RasNode) other).nodeId);
         }
         return false;
     }
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/resource/RAS_Nodes.java b/storm-server/src/main/java/org/apache/storm/scheduler/resource/RasNodes.java
similarity index 89%
rename from storm-server/src/main/java/org/apache/storm/scheduler/resource/RAS_Nodes.java
rename to storm-server/src/main/java/org/apache/storm/scheduler/resource/RasNodes.java
index 584934b..9ddbcf4b 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/resource/RAS_Nodes.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/resource/RasNodes.java
@@ -30,19 +30,19 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class RAS_Nodes {
+public class RasNodes {
 
-    private static final Logger LOG = LoggerFactory.getLogger(RAS_Nodes.class);
-    private Map<String, RAS_Node> nodeMap;
+    private static final Logger LOG = LoggerFactory.getLogger(RasNodes.class);
+    private Map<String, RasNode> nodeMap;
 
-    public RAS_Nodes(Cluster cluster) {
+    public RasNodes(Cluster cluster) {
         this.nodeMap = getAllNodesFrom(cluster);
     }
 
-    public static Map<String, RAS_Node> getAllNodesFrom(Cluster cluster) {
+    public static Map<String, RasNode> getAllNodesFrom(Cluster cluster) {
 
         //A map of node ids to node objects
-        Map<String, RAS_Node> nodeIdToNode = new HashMap<>();
+        Map<String, RasNode> nodeIdToNode = new HashMap<>();
         //A map of assignments organized by node with the following format:
         //{nodeId -> {topologyId -> {workerId -> {execs}}}}
         Map<String, Map<String, Map<String, Collection<ExecutorDetails>>>> assignmentRelationshipMap = new HashMap<>();
@@ -55,7 +55,6 @@
                 assignment.getSlotToExecutors().entrySet()) {
                 WorkerSlot slot = entry.getKey();
                 String nodeId = slot.getNodeId();
-                Collection<ExecutorDetails> execs = entry.getValue();
                 if (!assignmentRelationshipMap.containsKey(nodeId)) {
                     assignmentRelationshipMap.put(
                         nodeId, new HashMap<String, Map<String, Collection<ExecutorDetails>>>());
@@ -73,6 +72,7 @@
                         .get(topId)
                         .put(slot.getId(), new LinkedList<ExecutorDetails>());
                 }
+                Collection<ExecutorDetails> execs = entry.getValue();
                 assignmentRelationshipMap.get(nodeId).get(topId).get(slot.getId()).addAll(execs);
             }
         }
@@ -90,7 +90,7 @@
             }
             nodeIdToNode.put(
                 sup.getId(),
-                new RAS_Node(
+                new RasNode(
                     sup.getId(),
                     sup,
                     cluster,
@@ -109,7 +109,7 @@
                     nodeId,
                     assignments);
                 nodeIdToNode.put(
-                    nodeId, new RAS_Node(nodeId, null, cluster, workerIdToWorker.get(nodeId), assignments));
+                    nodeId, new RasNode(nodeId, null, cluster, workerIdToWorker.get(nodeId), assignments));
             }
         }
         return nodeIdToNode;
@@ -118,7 +118,7 @@
     /**
      * get node object from nodeId.
      */
-    public RAS_Node getNodeById(String nodeId) {
+    public RasNode getNodeById(String nodeId) {
         return this.nodeMap.get(nodeId);
     }
 
@@ -128,7 +128,7 @@
      * @param workerSlots the slots to free
      */
     public void freeSlots(Collection<WorkerSlot> workerSlots) {
-        for (RAS_Node node : nodeMap.values()) {
+        for (RasNode node : nodeMap.values()) {
             for (WorkerSlot ws : node.getUsedSlots()) {
                 if (workerSlots.contains(ws)) {
                     LOG.debug("freeing ws {} on node {}", ws, node);
@@ -138,14 +138,14 @@
         }
     }
 
-    public Collection<RAS_Node> getNodes() {
+    public Collection<RasNode> getNodes() {
         return this.nodeMap.values();
     }
 
     @Override
     public String toString() {
         StringBuilder ret = new StringBuilder();
-        for (RAS_Node node : nodeMap.values()) {
+        for (RasNode node : nodeMap.values()) {
             ret.append(node).append("\n");
         }
         return ret.toString();
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/resource/ResourceAwareScheduler.java b/storm-server/src/main/java/org/apache/storm/scheduler/resource/ResourceAwareScheduler.java
index 5de265c..43ef699 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/resource/ResourceAwareScheduler.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/resource/ResourceAwareScheduler.java
@@ -123,7 +123,7 @@
                                   List<TopologyDetails> orderedTopologies) {
         //A copy of cluster that we can modify, but does not get committed back to cluster unless scheduling succeeds
         Cluster workingState = new Cluster(cluster);
-        RAS_Nodes nodes = new RAS_Nodes(workingState);
+        RasNodes nodes = new RasNodes(workingState);
         IStrategy rasStrategy = null;
         String strategyConf = (String) td.getConf().get(Config.TOPOLOGY_SCHEDULER_STRATEGY);
         try {
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/DefaultSchedulingPriorityStrategy.java b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/DefaultSchedulingPriorityStrategy.java
index 2e1ca79..4cc0a85 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/DefaultSchedulingPriorityStrategy.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/DefaultSchedulingPriorityStrategy.java
@@ -137,8 +137,8 @@
     }
 
     /**
-     * Comparator that sorts topologies by priority and then by submission time
-     * First sort by Topology Priority, if there is a tie for topology priority, topology uptime is used to sort
+     * Comparator that sorts topologies by priority and then by submission time.
+     * First sort by Topology Priority, if there is a tie for topology priority, topology uptime is used to sort.
      */
     private static class TopologyByPriorityAndSubmissionTimeComparator implements Comparator<TopologyDetails> {
 
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/FIFOSchedulingPriorityStrategy.java b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/FIFOSchedulingPriorityStrategy.java
index 229f5b5..0076c75 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/FIFOSchedulingPriorityStrategy.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/priority/FIFOSchedulingPriorityStrategy.java
@@ -20,6 +20,7 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+@SuppressWarnings("checkstyle:AbbreviationAsWordInName")
 public class FIFOSchedulingPriorityStrategy extends DefaultSchedulingPriorityStrategy {
     private static final Logger LOG = LoggerFactory.getLogger(FIFOSchedulingPriorityStrategy.class);
 
@@ -54,8 +55,8 @@
     }
 
     /**
-     * Comparator that sorts topologies by priority and then by submission time
-     * First sort by Topology Priority, if there is a tie for topology priority, topology uptime is used to sort
+     * Comparator that sorts topologies by priority and then by submission time.
+     * First sort by Topology Priority, if there is a tie for topology priority, topology uptime is used to sort.
      */
     private static class TopologyBySubmissionTimeComparator implements Comparator<TopologyDetails> {
 
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/BaseResourceAwareStrategy.java b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/BaseResourceAwareStrategy.java
index 15ddbf6..55279b8 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/BaseResourceAwareStrategy.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/BaseResourceAwareStrategy.java
@@ -39,13 +39,12 @@
 import org.apache.storm.scheduler.SchedulerAssignment;
 import org.apache.storm.scheduler.TopologyDetails;
 import org.apache.storm.scheduler.WorkerSlot;
-import org.apache.storm.scheduler.resource.RAS_Node;
-import org.apache.storm.scheduler.resource.RAS_Nodes;
+import org.apache.storm.scheduler.resource.RasNode;
+import org.apache.storm.scheduler.resource.RasNodes;
 import org.apache.storm.scheduler.resource.SchedulingResult;
 import org.apache.storm.scheduler.resource.SchedulingStatus;
 import org.apache.storm.scheduler.resource.normalization.NormalizedResourceOffer;
 import org.apache.storm.scheduler.resource.normalization.NormalizedResourceRequest;
-import org.apache.storm.scheduler.resource.normalization.ResourceMetrics;
 import org.apache.storm.shade.com.google.common.annotations.VisibleForTesting;
 import org.apache.storm.shade.com.google.common.collect.Sets;
 import org.slf4j.Logger;
@@ -54,18 +53,19 @@
 public abstract class BaseResourceAwareStrategy implements IStrategy {
     private static final Logger LOG = LoggerFactory.getLogger(BaseResourceAwareStrategy.class);
     protected Cluster cluster;
+    private boolean oneExecutorPerWorker = false;
     // Rack id to list of host names in that rack
     private Map<String, List<String>> networkTopography;
     private final Map<String, String> superIdToRack = new HashMap<>();
     private final Map<String, String> superIdToHostname = new HashMap<>();
-    private final Map<String, List<RAS_Node>> hostnameToNodes = new HashMap<>();
-    private final Map<String, List<RAS_Node>> rackIdToNodes = new HashMap<>();
-    protected RAS_Nodes nodes;
+    private final Map<String, List<RasNode>> hostnameToNodes = new HashMap<>();
+    private final Map<String, List<RasNode>> rackIdToNodes = new HashMap<>();
+    protected RasNodes nodes;
 
     @VisibleForTesting
     void prepare(Cluster cluster) {
         this.cluster = cluster;
-        nodes = new RAS_Nodes(cluster);
+        nodes = new RasNodes(cluster);
         networkTopography = cluster.getNetworkTopography();
         Map<String, String> hostToRack = new HashMap<>();
         for (Map.Entry<String, List<String>> entry : networkTopography.entrySet()) {
@@ -74,7 +74,7 @@
                 hostToRack.put(hostName, rackId);
             }
         }
-        for (RAS_Node node: nodes.getNodes()) {
+        for (RasNode node: nodes.getNodes()) {
             String superId = node.getId();
             String hostName = node.getHostname();
             String rackId = hostToRack.getOrDefault(hostName, DNSToSwitchMapping.DEFAULT_RACK);
@@ -91,6 +91,10 @@
         //NOOP
     }
 
+    protected void setOneExecutorPerWorker(boolean oneExecutorPerWorker) {
+        this.oneExecutorPerWorker = oneExecutorPerWorker;
+    }
+
     protected SchedulingResult mkNotEnoughResources(TopologyDetails td) {
         return  SchedulingResult.failure(
             SchedulingStatus.FAIL_NOT_ENOUGH_RESOURCES,
@@ -109,7 +113,7 @@
             ExecutorDetails exec, TopologyDetails td, Collection<ExecutorDetails> scheduledTasks, Iterable<String> sortedNodes) {
         WorkerSlot targetSlot = findWorkerForExec(exec, td, sortedNodes);
         if (targetSlot != null) {
-            RAS_Node targetNode = idToNode(targetSlot.getNodeId());
+            RasNode targetNode = idToNode(targetSlot.getNodeId());
             targetNode.assignSingleExecutor(targetSlot, exec, td);
             scheduledTasks.add(exec);
             LOG.debug(
@@ -133,8 +137,8 @@
     }
 
     protected abstract TreeSet<ObjectResources> sortObjectResources(
-        final AllResources allResources, ExecutorDetails exec, TopologyDetails topologyDetails,
-        final ExistingScheduleFunc existingScheduleFunc
+        AllResources allResources, ExecutorDetails exec, TopologyDetails topologyDetails,
+        ExistingScheduleFunc existingScheduleFunc
     );
 
     /**
@@ -146,11 +150,14 @@
      */
     protected WorkerSlot findWorkerForExec(ExecutorDetails exec, TopologyDetails td, Iterable<String> sortedNodes) {
         for (String id : sortedNodes) {
-            RAS_Node node = nodes.getNodeById(id);
+            RasNode node = nodes.getNodeById(id);
             if (node.couldEverFit(exec, td)) {
+                Collection<WorkerSlot> topologyUsedSlots = oneExecutorPerWorker ? node.getUsedSlots(td.getId()) : Collections.emptySet();
                 for (WorkerSlot ws : node.getSlotsAvailableToScheduleOn()) {
-                    if (node.wouldFit(ws, exec, td)) {
-                        return ws;
+                    if (!topologyUsedSlots.contains(ws)) {
+                        if (node.wouldFit(ws, exec, td)) {
+                            return ws;
+                        }
                     }
                 }
             }
@@ -176,12 +183,12 @@
      * @return a sorted list of nodes.
      */
     protected TreeSet<ObjectResources> sortNodes(
-            List<RAS_Node> availNodes, ExecutorDetails exec, TopologyDetails topologyDetails, String rackId,
+            List<RasNode> availNodes, ExecutorDetails exec, TopologyDetails topologyDetails, String rackId,
             Map<String, AtomicInteger> scheduledCount) {
         AllResources allRackResources = new AllResources("RACK");
         List<ObjectResources> nodes = allRackResources.objectResources;
 
-        for (RAS_Node rasNode : availNodes) {
+        for (RasNode rasNode : availNodes) {
             String superId = rasNode.getId();
             ObjectResources node = new ObjectResources(superId);
 
@@ -219,9 +226,9 @@
         }
         List<String> ret = new ArrayList<>(hosts.size());
         for (String host: hosts) {
-            List<RAS_Node> nodes = hostnameToNodes.get(host);
+            List<RasNode> nodes = hostnameToNodes.get(host);
             if (nodes != null) {
-                for (RAS_Node node : nodes) {
+                for (RasNode node : nodes) {
                     ret.add(node.getId());
                 }
             }
@@ -238,7 +245,7 @@
         private final Iterator<String> post;
         private final Set<String> skip;
 
-        public LazyNodeSortingIterator(LazyNodeSorting parent,
+        LazyNodeSortingIterator(LazyNodeSorting parent,
                                        TreeSet<ObjectResources> sortedRacks) {
             this.parent = parent;
             rackIterator = sortedRacks.iterator();
@@ -308,7 +315,7 @@
         private final List<String> unFavoredNodeIds;
         private final Set<String> skippedNodeIds = new HashSet<>();
 
-        public LazyNodeSorting(TopologyDetails td, ExecutorDetails exec,
+        LazyNodeSorting(TopologyDetails td, ExecutorDetails exec,
                                List<String> favoredNodeIds, List<String> unFavoredNodeIds) {
             this.favoredNodeIds = favoredNodeIds;
             this.unFavoredNodeIds = unFavoredNodeIds;
@@ -358,7 +365,7 @@
             ObjectResources rack = new ObjectResources(rackId);
             racks.add(rack);
             for (String nodeHost : nodeHosts) {
-                for (RAS_Node node : hostnameToNodes(nodeHost)) {
+                for (RasNode node : hostnameToNodes(nodeHost)) {
                     rack.availableResources.add(node.getTotalAvailableResources());
                     rack.totalResources.add(node.getTotalAvailableResources());
                 }
@@ -431,7 +438,7 @@
      * @param node the node to find out which rack its on
      * @return the rack id
      */
-    protected String nodeToRack(RAS_Node node) {
+    protected String nodeToRack(RasNode node) {
         return superIdToRack.get(node.getId());
     }
 
@@ -579,7 +586,7 @@
                 String rackId = clusterEntry.getKey();
                 LOG.debug("Rack: {}", rackId);
                 for (String nodeHostname : clusterEntry.getValue()) {
-                    for (RAS_Node node : hostnameToNodes(nodeHostname)) {
+                    for (RasNode node : hostnameToNodes(nodeHostname)) {
                         LOG.debug("-> Node: {} {}", node.getHostname(), node.getId());
                         LOG.debug(
                             "--> Avail Resources: {Mem {}, CPU {} Slots: {}}",
@@ -603,18 +610,18 @@
      * @param hostname the hostname.
      * @return the ids n that node.
      */
-    public List<RAS_Node> hostnameToNodes(String hostname) {
+    public List<RasNode> hostnameToNodes(String hostname) {
         return hostnameToNodes.getOrDefault(hostname, Collections.emptyList());
     }
 
     /**
-     * Find RAS_Node for specified node id.
+     * Find RASNode for specified node id.
      *
      * @param id the node/supervisor id to lookup
-     * @return a RAS_Node object
+     * @return a RASNode object
      */
-    public RAS_Node idToNode(String id) {
-        RAS_Node ret = nodes.getNodeById(id);
+    public RasNode idToNode(String id) {
+        RasNode ret = nodes.getNodeById(id);
         if (ret == null) {
             LOG.error("Cannot find Node with Id: {}", id);
         }
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/ConstraintSolverStrategy.java b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/ConstraintSolverStrategy.java
index 9f8265d..cacba7a 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/ConstraintSolverStrategy.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/ConstraintSolverStrategy.java
@@ -31,8 +31,8 @@
 import org.apache.storm.scheduler.SchedulerAssignment;
 import org.apache.storm.scheduler.TopologyDetails;
 import org.apache.storm.scheduler.WorkerSlot;
-import org.apache.storm.scheduler.resource.RAS_Node;
-import org.apache.storm.scheduler.resource.RAS_Nodes;
+import org.apache.storm.scheduler.resource.RasNode;
+import org.apache.storm.scheduler.resource.RasNodes;
 import org.apache.storm.scheduler.resource.SchedulingResult;
 import org.apache.storm.scheduler.resource.SchedulingStatus;
 import org.apache.storm.shade.com.google.common.annotations.VisibleForTesting;
@@ -49,7 +49,7 @@
     private Map<String, Map<String, Integer>> constraintMatrix;
     private HashSet<String> spreadComps = new HashSet<>();
 
-    private Map<String, RAS_Node> nodes;
+    private Map<String, RasNode> nodes;
     private Map<ExecutorDetails, String> execToComp;
     private Map<String, Set<ExecutorDetails>> compToExecs;
     private List<String> favoredNodeIds;
@@ -124,9 +124,9 @@
         return true;
     }
 
-    private static Map<WorkerSlot, RAS_Node> workerToNodes(Cluster cluster) {
-        Map<WorkerSlot, RAS_Node> workerToNodes = new HashMap<>();
-        for (RAS_Node node : RAS_Nodes.getAllNodesFrom(cluster).values()) {
+    private static Map<WorkerSlot, RasNode> workerToNodes(Cluster cluster) {
+        Map<WorkerSlot, RasNode> workerToNodes = new HashMap<>();
+        for (RasNode node : RasNodes.getAllNodesFrom(cluster).values()) {
             for (WorkerSlot s : node.getUsedSlots()) {
                 workerToNodes.put(s, node);
             }
@@ -141,15 +141,15 @@
         Map<ExecutorDetails, String> execToComp = topo.getExecutorToComponent();
         Map<WorkerSlot, HashSet<ExecutorDetails>> workerExecMap = new HashMap<>();
         Map<WorkerSlot, HashSet<String>> workerCompMap = new HashMap<>();
-        Map<RAS_Node, HashSet<String>> nodeCompMap = new HashMap<>();
-        Map<WorkerSlot, RAS_Node> workerToNodes = workerToNodes(cluster);
+        Map<RasNode, HashSet<String>> nodeCompMap = new HashMap<>();
+        Map<WorkerSlot, RasNode> workerToNodes = workerToNodes(cluster);
         boolean ret = true;
 
         HashSet<String> spreadComps = getSpreadComps(topo);
         for (Map.Entry<ExecutorDetails, WorkerSlot> entry : result.entrySet()) {
             ExecutorDetails exec = entry.getKey();
             WorkerSlot worker = entry.getValue();
-            RAS_Node node = workerToNodes.get(worker);
+            RasNode node = workerToNodes.get(worker);
 
             if (workerExecMap.computeIfAbsent(worker, (k) -> new HashSet<>()).contains(exec)) {
                 LOG.error("Incorrect Scheduling: Found duplicate in scheduling");
@@ -177,9 +177,9 @@
         LOG.info("Checking Resources...");
         assert (cluster.getAssignmentById(topo.getId()) != null);
         Map<ExecutorDetails, WorkerSlot> result = cluster.getAssignmentById(topo.getId()).getExecutorToSlot();
-        Map<RAS_Node, Collection<ExecutorDetails>> nodeToExecs = new HashMap<>();
+        Map<RasNode, Collection<ExecutorDetails>> nodeToExecs = new HashMap<>();
         Map<ExecutorDetails, WorkerSlot> mergedExecToWorker = new HashMap<>();
-        Map<String, RAS_Node> nodes = RAS_Nodes.getAllNodesFrom(cluster);
+        Map<String, RasNode> nodes = RasNodes.getAllNodesFrom(cluster);
         //merge with existing assignments
         if (cluster.getAssignmentById(topo.getId()) != null
             && cluster.getAssignmentById(topo.getId()).getExecutorToSlot() != null) {
@@ -190,7 +190,7 @@
         for (Map.Entry<ExecutorDetails, WorkerSlot> entry : mergedExecToWorker.entrySet()) {
             ExecutorDetails exec = entry.getKey();
             WorkerSlot worker = entry.getValue();
-            RAS_Node node = nodes.get(worker.getNodeId());
+            RasNode node = nodes.get(worker.getNodeId());
 
             if (node.getAvailableMemoryResources() < 0.0 && node.getAvailableCpuResources() < 0.0) {
                 LOG.error("Incorrect Scheduling: found node with negative available resources");
@@ -199,8 +199,8 @@
             nodeToExecs.computeIfAbsent(node, (k) -> new HashSet<>()).add(exec);
         }
 
-        for (Map.Entry<RAS_Node, Collection<ExecutorDetails>> entry : nodeToExecs.entrySet()) {
-            RAS_Node node = entry.getKey();
+        for (Map.Entry<RasNode, Collection<ExecutorDetails>> entry : nodeToExecs.entrySet()) {
+            RasNode node = entry.getKey();
             Collection<ExecutorDetails> execs = entry.getValue();
             double cpuUsed = 0.0;
             double memoryUsed = 0.0;
@@ -244,9 +244,9 @@
     public SchedulingResult schedule(Cluster cluster, TopologyDetails td) {
         prepare(cluster);
         LOG.debug("Scheduling {}", td.getId());
-        nodes = RAS_Nodes.getAllNodesFrom(cluster);
+        nodes = RasNodes.getAllNodesFrom(cluster);
         Map<WorkerSlot, Set<String>> workerCompAssignment = new HashMap<>();
-        Map<RAS_Node, Set<String>> nodeCompAssignment = new HashMap<>();
+        Map<RasNode, Set<String>> nodeCompAssignment = new HashMap<>();
 
         int confMaxStateSearch = ObjectReader.getInt(td.getConf().get(Config.TOPOLOGY_RAS_CONSTRAINT_MAX_STATE_SEARCH));
         int daemonMaxStateSearch = ObjectReader.getInt(cluster.getConf().get(DaemonConfig.RESOURCE_AWARE_SCHEDULER_MAX_STATE_SEARCH));
@@ -279,7 +279,7 @@
         if (existingAssignment != null) {
             existingAssignment.getExecutorToSlot().forEach((exec, ws) -> {
                 String compId = execToComp.get(exec);
-                RAS_Node node = nodes.get(ws.getNodeId());
+                RasNode node = nodes.get(ws.getNodeId());
                 //populate node to component Assignments
                 nodeCompAssignment.computeIfAbsent(node, (k) -> new HashSet<>()).add(compId);
                 //populate worker to comp assignments
@@ -337,7 +337,7 @@
         Iterable<String> sortedNodes = sortAllNodes(state.td, exec, favoredNodeIds, unFavoredNodeIds);
 
         for (String nodeId: sortedNodes) {
-            RAS_Node node = nodes.get(nodeId);
+            RasNode node = nodes.get(nodeId);
             for (WorkerSlot workerSlot : node.getSlotsAvailableToScheduleOn()) {
                 if (isExecAssignmentToWorkerValid(workerSlot, state)) {
                     state.tryToSchedule(execToComp, node, workerSlot);
@@ -374,7 +374,7 @@
     public boolean isExecAssignmentToWorkerValid(WorkerSlot worker, SearcherState state) {
         final ExecutorDetails exec = state.currentExec();
         //check resources
-        RAS_Node node = nodes.get(worker.getNodeId());
+        RasNode node = nodes.get(worker.getNodeId());
         if (!node.wouldFit(worker, exec, state.td)) {
             LOG.trace("{} would not fit in resources available on {}", exec, worker);
             return false;
@@ -481,7 +481,7 @@
         private final Map<WorkerSlot, Set<String>> workerCompAssignment;
         private final boolean[] okToRemoveFromWorker;
         // for the currently tested assignment a Map of the node to the components on it to be able to enforce constraints
-        private final Map<RAS_Node, Set<String>> nodeCompAssignment;
+        private final Map<RasNode, Set<String>> nodeCompAssignment;
         private final boolean[] okToRemoveFromNode;
         // Static State
         // The list of all executors (preferably sorted to make assignments simpler).
@@ -499,7 +499,7 @@
         // The current executor we are trying to schedule
         private int execIndex = 0;
 
-        private SearcherState(Map<WorkerSlot, Set<String>> workerCompAssignment, Map<RAS_Node, Set<String>> nodeCompAssignment,
+        private SearcherState(Map<WorkerSlot, Set<String>> workerCompAssignment, Map<RasNode, Set<String>> nodeCompAssignment,
                               int maxStatesSearched, long maxTimeMs, List<ExecutorDetails> execs, TopologyDetails td) {
             assert !execs.isEmpty();
             assert execs != null;
@@ -551,7 +551,7 @@
             return execs.get(execIndex);
         }
 
-        public void tryToSchedule(Map<ExecutorDetails, String> execToComp, RAS_Node node, WorkerSlot workerSlot) {
+        public void tryToSchedule(Map<ExecutorDetails, String> execToComp, RasNode node, WorkerSlot workerSlot) {
             ExecutorDetails exec = currentExec();
             String comp = execToComp.get(exec);
             LOG.trace("Trying assignment of {} {} to {}", exec, comp, workerSlot);
@@ -561,7 +561,7 @@
             node.assignSingleExecutor(workerSlot, exec, td);
         }
 
-        public void backtrack(Map<ExecutorDetails, String> execToComp, RAS_Node node, WorkerSlot workerSlot) {
+        public void backtrack(Map<ExecutorDetails, String> execToComp, RasNode node, WorkerSlot workerSlot) {
             execIndex--;
             if (execIndex < 0) {
                 throw new IllegalStateException("Internal Error: exec index became negative");
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/DefaultResourceAwareStrategy.java b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/DefaultResourceAwareStrategy.java
index 6c3c1f7..9b2a7bd 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/DefaultResourceAwareStrategy.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/resource/strategies/scheduling/DefaultResourceAwareStrategy.java
@@ -39,6 +39,9 @@
 
     @Override
     public SchedulingResult schedule(Cluster cluster, TopologyDetails td) {
+        boolean oneExecutorPerWorker = (Boolean) td.getConf().get(Config.TOPOLOGY_RAS_ONE_EXECUTOR_PER_WORKER);
+        setOneExecutorPerWorker(oneExecutorPerWorker);
+
         prepare(cluster);
         if (nodes.getNodes().size() <= 0) {
             LOG.warn("No available nodes to schedule tasks on!");
diff --git a/storm-server/src/main/java/org/apache/storm/scheduler/utils/ArtifactoryConfigLoader.java b/storm-server/src/main/java/org/apache/storm/scheduler/utils/ArtifactoryConfigLoader.java
index cfb6bb7..e8f8a94 100644
--- a/storm-server/src/main/java/org/apache/storm/scheduler/utils/ArtifactoryConfigLoader.java
+++ b/storm-server/src/main/java/org/apache/storm/scheduler/utils/ArtifactoryConfigLoader.java
@@ -64,7 +64,7 @@
     private int lastReturnedTime = 0;
     private int timeoutSeconds = DEFAULT_TIMEOUT_SECS;
     private Map<String, Object> lastReturnedValue;
-    private URI targetURI = null;
+    private URI targetUri = null;
     private JSONParser jsonParser;
     private String scheme;
 
@@ -87,8 +87,8 @@
             LOG.error("No URI defined in {} configuration.", DaemonConfig.SCHEDULER_CONFIG_LOADER_URI);
         } else {
             try {
-                targetURI = new URI(uriString);
-                scheme = targetURI.getScheme().substring(ARTIFACTORY_SCHEME_PREFIX.length());
+                targetUri = new URI(uriString);
+                scheme = targetUri.getScheme().substring(ARTIFACTORY_SCHEME_PREFIX.length());
             } catch (URISyntaxException e) {
                 LOG.error("Failed to parse uri={}", uriString);
             }
@@ -103,7 +103,7 @@
      */
     @Override
     public Map<String, Object> load(String configKey) {
-        if (targetURI == null) {
+        if (targetUri == null) {
             return null;
         }
 
@@ -116,25 +116,25 @@
         }
 
         try {
-            Map<String, Object> raw = loadFromURI(targetURI);
+            Map<String, Object> raw = loadFromUri(targetUri);
             if (raw != null) {
                 return (Map<String, Object>) raw.get(configKey);
             }
         } catch (Exception e) {
-            LOG.error("Failed to load from uri {}", targetURI);
+            LOG.error("Failed to load from uri {}", targetUri);
         }
         return null;
     }
 
     /**
+     * Protected so we can override this in unit tests.
+     *
      * @param api null if we are trying to download artifact, otherwise a string to call REST api,
      *        e.g. "/api/storage"
      * @param artifact location of artifact
      * @param host Artifactory hostname
      * @param port Artifactory port
      * @return null on failure or the response string if return code is in 200 range
-     *
-     * <p>Protected so we can override this in unit tests
      */
     protected String doGet(String api, String artifact, String host, Integer port) {
         URIBuilder builder = new URIBuilder().setScheme(scheme).setHost(host).setPort(port);
@@ -158,7 +158,7 @@
             LOG.debug("About to issue a GET to {}", builder);
             HttpGet httpget = new HttpGet(builder.build());
             String responseBody;
-            responseBody = httpclient.execute(httpget, GETStringResponseHandler.getInstance());
+            responseBody = httpclient.execute(httpget, GetStringResponseHandler.getInstance());
             returnValue = responseBody;
         } catch (Exception e) {
             LOG.error("Received exception while connecting to Artifactory", e);
@@ -196,10 +196,10 @@
             LOG.error("got null metadata");
             return null;
         }
-        String downloadURI = (String) json.get("downloadUri");
+        String downloadUri = (String) json.get("downloadUri");
 
         // This means we are pointing at a file.
-        if (downloadURI != null) {
+        if (downloadUri != null) {
             // Then get it and return the file as string.
             String returnValue = doGet(null, location, host, port);
             saveInArtifactoryCache(returnValue);
@@ -298,7 +298,7 @@
         cacheInitialized = true;
     }
 
-    private Map<String, Object> loadFromURI(URI uri) throws IOException {
+    private Map<String, Object> loadFromUri(URI uri) throws IOException {
         String host = uri.getHost();
         Integer port = uri.getPort();
         String location = uri.getPath();
@@ -342,20 +342,22 @@
     /**
      * A private class used to check the response coming back from httpclient.
      */
-    private static class GETStringResponseHandler implements ResponseHandler<String> {
-        private static GETStringResponseHandler singleton = null;
+    private static class GetStringResponseHandler implements ResponseHandler<String> {
+        private static GetStringResponseHandler singleton = null;
 
         /**
+         * Get instance.
          * @return a singleton httpclient GET response handler
          */
-        public static GETStringResponseHandler getInstance() {
+        public static GetStringResponseHandler getInstance() {
             if (singleton == null) {
-                singleton = new GETStringResponseHandler();
+                singleton = new GetStringResponseHandler();
             }
             return singleton;
         }
 
         /**
+         * Handle response.
          * @param response The http response to verify.
          * @return null on failure or the response string if return code is in 200 range
          */
@@ -374,6 +376,7 @@
     }
 
     private class DirEntryCompare implements Comparator<JSONObject> {
+
         @Override
         public int compare(JSONObject o1, JSONObject o2) {
             return ((String) o1.get("uri")).compareTo((String) o2.get("uri"));
diff --git a/storm-server/src/main/java/org/apache/storm/security/auth/DefaultHttpCredentialsPlugin.java b/storm-server/src/main/java/org/apache/storm/security/auth/DefaultHttpCredentialsPlugin.java
index b6f4b3b..1da3a9b 100644
--- a/storm-server/src/main/java/org/apache/storm/security/auth/DefaultHttpCredentialsPlugin.java
+++ b/storm-server/src/main/java/org/apache/storm/security/auth/DefaultHttpCredentialsPlugin.java
@@ -26,7 +26,7 @@
         LoggerFactory.getLogger(DefaultHttpCredentialsPlugin.class);
 
     /**
-     * No-op
+     * No-op.
      *
      * @param topoConf Storm configuration
      */
diff --git a/storm-server/src/main/java/org/apache/storm/security/auth/IHttpCredentialsPlugin.java b/storm-server/src/main/java/org/apache/storm/security/auth/IHttpCredentialsPlugin.java
index b14ce85..ad7daaa 100644
--- a/storm-server/src/main/java/org/apache/storm/security/auth/IHttpCredentialsPlugin.java
+++ b/storm-server/src/main/java/org/apache/storm/security/auth/IHttpCredentialsPlugin.java
@@ -22,11 +22,12 @@
 import javax.servlet.http.HttpServletRequest;
 
 /**
- * Interface for handling credentials in an HttpServletRequest
+ * Interface for handling credentials in an HttpServletRequest.
  */
 public interface IHttpCredentialsPlugin {
+
     /**
-     * Invoked once immediately after construction
+     * Invoked once immediately after construction.
      *
      * @param topoConf Storm configuration
      */
diff --git a/storm-server/src/main/java/org/apache/storm/security/auth/ServerAuthUtils.java b/storm-server/src/main/java/org/apache/storm/security/auth/ServerAuthUtils.java
index 2b1c299..31c9f3c 100644
--- a/storm-server/src/main/java/org/apache/storm/security/auth/ServerAuthUtils.java
+++ b/storm-server/src/main/java/org/apache/storm/security/auth/ServerAuthUtils.java
@@ -39,7 +39,7 @@
     }
 
     /**
-     * Construct an HttpServletRequest credential plugin specified by the UI storm configuration
+     * Construct an HttpServletRequest credential plugin specified by the UI storm configuration.
      *
      * @param conf storm configuration
      * @return the plugin
@@ -50,7 +50,7 @@
     }
 
     /**
-     * Construct an HttpServletRequest credential plugin specified by the DRPC storm configuration
+     * Construct an HttpServletRequest credential plugin specified by the DRPC storm configuration.
      *
      * @param conf storm configuration
      * @return the plugin
diff --git a/storm-server/src/main/java/org/apache/storm/security/auth/workertoken/WorkerTokenManager.java b/storm-server/src/main/java/org/apache/storm/security/auth/workertoken/WorkerTokenManager.java
index a87d448..1490328 100644
--- a/storm-server/src/main/java/org/apache/storm/security/auth/workertoken/WorkerTokenManager.java
+++ b/storm-server/src/main/java/org/apache/storm/security/auth/workertoken/WorkerTokenManager.java
@@ -128,9 +128,9 @@
      * @param topologyId the topology the credentials are for
      */
     public void upsertWorkerTokensInCredsForTopo(Map<String, String> creds, String user, String topologyId) {
-        Arrays.stream(WorkerTokenServiceType.values()).filter(type -> shouldRenewWorkerToken(creds, type))
-              .forEach(type -> {ClientAuthUtils.setWorkerToken(creds, createOrUpdateTokenFor(type, user, topologyId));
-              });
+        Arrays.stream(WorkerTokenServiceType.values())
+                .filter(type -> shouldRenewWorkerToken(creds, type))
+                .forEach(type -> ClientAuthUtils.setWorkerToken(creds, createOrUpdateTokenFor(type, user, topologyId)));
     }
 
     @VisibleForTesting
diff --git a/storm-server/src/main/java/org/apache/storm/stats/StatsUtil.java b/storm-server/src/main/java/org/apache/storm/stats/StatsUtil.java
index 05d4edd..79d8046 100644
--- a/storm-server/src/main/java/org/apache/storm/stats/StatsUtil.java
+++ b/storm-server/src/main/java/org/apache/storm/stats/StatsUtil.java
@@ -265,8 +265,6 @@
         Map win2sid2acked = windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, ACKED), TO_STRING);
         Map win2sid2failed = windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, FAILED), TO_STRING);
         Map win2sid2emitted = windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, EMITTED), TO_STRING);
-        Map win2sid2transferred = windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, TRANSFERRED), TO_STRING);
-        Map win2sid2compLat = windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, COMP_LATENCIES), TO_STRING);
 
         outputStats.put(ACKED, win2sid2acked.get(window));
         outputStats.put(FAILED, win2sid2failed.get(window));
@@ -276,6 +274,7 @@
         }
         outputStats.put(EMITTED, filterSysStreams2Stat(sid2emitted, includeSys));
 
+        Map win2sid2transferred = windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, TRANSFERRED), TO_STRING);
         Map<String, Long> sid2transferred = (Map) win2sid2transferred.get(window);
         if (sid2transferred == null) {
             sid2transferred = new HashMap<>();
@@ -283,6 +282,7 @@
         outputStats.put(TRANSFERRED, filterSysStreams2Stat(sid2transferred, includeSys));
         outputStats = swapMapOrder(outputStats);
 
+        Map win2sid2compLat = windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, COMP_LATENCIES), TO_STRING);
         Map sid2compLat = (Map) win2sid2compLat.get(window);
         Map sid2acked = (Map) win2sid2acked.get(window);
         mergeMaps(outputStats, aggSpoutStreamsLatAndCount(sid2compLat, sid2acked));
@@ -301,8 +301,6 @@
      */
     public static <K, V extends Number> Map<String, Object> aggPreMergeTopoPageBolt(
         Map<String, Object> beat, String window, boolean includeSys) {
-        Map<String, Object> ret = new HashMap<>();
-
         Map<String, Object> subRet = new HashMap<>();
         subRet.put(NUM_EXECUTORS, 1);
         subRet.put(NUM_TASKS, beat.get(NUM_TASKS));
@@ -334,6 +332,7 @@
         subRet.putAll(aggBoltLatAndCount(
             win2sid2execLat.get(window), win2sid2procLat.get(window), win2sid2exec.get(window)));
 
+        Map<String, Object> ret = new HashMap<>();
         ret.put((String) beat.get("comp-id"), subRet);
         return ret;
     }
@@ -343,8 +342,6 @@
      */
     public static <K, V extends Number> Map<String, Object> aggPreMergeTopoPageSpout(
         Map<String, Object> m, String window, boolean includeSys) {
-        Map<String, Object> ret = new HashMap<>();
-
         Map<String, Object> subRet = new HashMap<>();
         subRet.put(NUM_EXECUTORS, 1);
         subRet.put(NUM_TASKS, m.get(NUM_TASKS));
@@ -372,6 +369,7 @@
             windowSetConverter(ClientStatsUtil.getMapByKey(stat2win2sid2num, ACKED), TO_STRING);
         subRet.putAll(aggSpoutLatAndCount(win2sid2compLat.get(window), win2sid2acked.get(window)));
 
+        Map<String, Object> ret = new HashMap<>();
         ret.put((String) m.get("comp-id"), subRet);
         return ret;
     }
@@ -522,8 +520,6 @@
      */
     public static Map<String, Object> aggTopoExecStats(
         String window, boolean includeSys, Map<String, Object> accStats, Map<String, Object> beat, String compType) {
-        Map<String, Object> ret = new HashMap<>();
-
         boolean isSpout = compType.equals(ClientStatsUtil.SPOUT);
         // component id -> stats
         Map<String, Object> cid2stats;
@@ -552,6 +548,7 @@
             w2acked = aggregateCountStreams(ClientStatsUtil.getMapByKey(stats, ACKED));
         }
 
+        Map<String, Object> ret = new HashMap<>();
         Set workerSet = (Set) accStats.get(WORKERS_SET);
         workerSet.add(Lists.newArrayList(beat.get(HOST), beat.get(PORT)));
         ret.put(WORKERS_SET, workerSet);
diff --git a/storm-server/src/main/java/org/apache/storm/testing/CompleteTopologyParam.java b/storm-server/src/main/java/org/apache/storm/testing/CompleteTopologyParam.java
index eafb315..5ad60fb 100644
--- a/storm-server/src/main/java/org/apache/storm/testing/CompleteTopologyParam.java
+++ b/storm-server/src/main/java/org/apache/storm/testing/CompleteTopologyParam.java
@@ -21,24 +21,24 @@
  */
 public class CompleteTopologyParam {
     /**
-     * The mocked spout sources
+     * The mocked spout sources.
      */
     private MockedSources mockedSources = new MockedSources();
     /**
-     * the config for the topology when it was submitted to the cluster
+     * the config for the topology when it was submitted to the cluster.
      */
     private Map<String, Object> topoConf = new Config();
     /**
-     * whether cleanup the state?
+     * Indicates whether to cleanup the state.
      */
     private boolean cleanupState = true;
     /**
-     * the topology name you want to submit to the cluster
+     * the topology name you want to submit to the cluster.
      */
     private String topologyName;
 
     /**
-     * the timeout of topology you want to submit to the cluster
+     * the timeout of topology you want to submit to the cluster.
      */
     private int timeoutMs = Testing.TEST_TIMEOUT_MS;
 
diff --git a/storm-server/src/main/java/org/apache/storm/testing/InProcessZookeeper.java b/storm-server/src/main/java/org/apache/storm/testing/InProcessZookeeper.java
index c3265df..6539517 100644
--- a/storm-server/src/main/java/org/apache/storm/testing/InProcessZookeeper.java
+++ b/storm-server/src/main/java/org/apache/storm/testing/InProcessZookeeper.java
@@ -34,6 +34,7 @@
     }
 
     /**
+     * Get port.
      * @return the port ZK is listening on (localhost)
      */
     public long getPort() {
diff --git a/storm-server/src/main/java/org/apache/storm/testing/TestJob.java b/storm-server/src/main/java/org/apache/storm/testing/TestJob.java
index 9c118e8..fc636b5 100644
--- a/storm-server/src/main/java/org/apache/storm/testing/TestJob.java
+++ b/storm-server/src/main/java/org/apache/storm/testing/TestJob.java
@@ -19,7 +19,7 @@
  * we put our java unit testing logic in the run method. A sample
  * code will be:
  *
- * ```java
+ * <p>```java
  * Testing.withSimulatedTimeLocalCluster(new TestJob() {
  *     public void run(Cluster cluster) {
  *         // your testing logic here.
@@ -34,5 +34,5 @@
      * @param cluster the cluster which created by <code>Testing.withSimulatedTimeLocalCluster</code>
      *        and <code>Testing.withTrackedCluster</code>.
      */
-    public void run(ILocalCluster cluster) throws Exception;
+    void run(ILocalCluster cluster) throws Exception;
 }
diff --git a/storm-server/src/main/java/org/apache/storm/testing/TrackedTopology.java b/storm-server/src/main/java/org/apache/storm/testing/TrackedTopology.java
index 1c6f738..4719ce6 100644
--- a/storm-server/src/main/java/org/apache/storm/testing/TrackedTopology.java
+++ b/storm-server/src/main/java/org/apache/storm/testing/TrackedTopology.java
@@ -12,6 +12,8 @@
 
 package org.apache.storm.testing;
 
+import static org.apache.storm.Testing.whileTimeout;
+
 import java.util.Random;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadLocalRandom;
@@ -29,8 +31,6 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.storm.Testing.whileTimeout;
-
 /**
  * A tracked topology keeps metrics for every bolt and spout.
  * This allows a test to know how many tuples have been fully processed.
@@ -79,7 +79,7 @@
     }
 
     /**
-     * Wait for 1 tuple to be fully processed
+     * Wait for 1 tuple to be fully processed.
      */
     public void trackedWait() {
         trackedWait(1, Testing.TEST_TIMEOUT_MS);
@@ -100,29 +100,30 @@
         final String id = cluster.getTrackedId();
         Random rand = ThreadLocalRandom.current();
         whileTimeout(timeoutMs,
-                     () -> {
-                         int se = globalAmt(id, "spout-emitted");
-                         int transferred = globalAmt(id, "transferred");
-                         int processed = globalAmt(id, "processed");
-                         LOG.info("emitted {} target {} transferred {} processed {}", se, target, transferred, processed);
-                         return (target != se) || (transferred != processed);
-                     },
-                     () -> {
-                         Time.advanceTimeSecs(1);
-                         try {
-                             Thread.sleep(rand.nextInt(200));
-                         } catch (Exception e) {
-                             throw new RuntimeException(e);
-                         }
-                     });
+            () -> {
+                int se = globalAmt(id, "spout-emitted");
+                int transferred = globalAmt(id, "transferred");
+                int processed = globalAmt(id, "processed");
+                LOG.info("emitted {} target {} transferred {} processed {}", se, target, transferred, processed);
+                return (target != se) || (transferred != processed);
+            },
+            () -> {
+                Time.advanceTimeSecs(1);
+                try {
+                    Thread.sleep(rand.nextInt(200));
+                } catch (Exception e) {
+                    throw new RuntimeException(e);
+                }
+            });
         lastSpoutCommit.set(target);
     }
 
     /**
-     * Read a metric from the tracked cluster (NOT JUST THIS TOPOLOGY)
+     * Read a metric from the tracked cluster (NOT JUST THIS TOPOLOGY).
      * @param key one of "spout-emitted", "processed", or "transferred"
      * @return the amount of that metric
      */
+    @SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder")
     public int globalAmt(String key) {
         return globalAmt(cluster.getTrackedId(), key);
     }
diff --git a/storm-server/src/main/java/org/apache/storm/utils/ServerConfigUtils.java b/storm-server/src/main/java/org/apache/storm/utils/ServerConfigUtils.java
index a96ddb6..2c932b8 100644
--- a/storm-server/src/main/java/org/apache/storm/utils/ServerConfigUtils.java
+++ b/storm-server/src/main/java/org/apache/storm/utils/ServerConfigUtils.java
@@ -81,6 +81,10 @@
         return ret;
     }
 
+    public static String masterStormDistRoot(Map<String, Object> conf, String stormId) throws IOException {
+        return (masterStormDistRoot(conf) + FILE_SEPARATOR + stormId);
+    }
+
     /* TODO: make sure test these two functions in manual tests */
     public static List<String> getTopoLogsUsers(Map<String, Object> topologyConf) {
         List<String> logsUsers = (List<String>) topologyConf.get(DaemonConfig.LOGS_USERS);
@@ -128,10 +132,6 @@
         return ret;
     }
 
-    public static String masterStormDistRoot(Map<String, Object> conf, String stormId) throws IOException {
-        return (masterStormDistRoot(conf) + FILE_SEPARATOR + stormId);
-    }
-
     public static String supervisorTmpDir(Map<String, Object> conf) throws IOException {
         String ret = ConfigUtils.supervisorLocalDir(conf) + FILE_SEPARATOR + "tmp";
         FileUtils.forceMkdir(new File(ret));
diff --git a/storm-server/src/main/java/org/apache/storm/utils/ServerUtils.java b/storm-server/src/main/java/org/apache/storm/utils/ServerUtils.java
index 85ade01..c026160 100644
--- a/storm-server/src/main/java/org/apache/storm/utils/ServerUtils.java
+++ b/storm-server/src/main/java/org/apache/storm/utils/ServerUtils.java
@@ -29,6 +29,7 @@
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.RandomAccessFile;
+import java.net.URL;
 import java.nio.file.FileSystems;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -126,11 +127,16 @@
         return null;
     }
 
-    public static BlobStore getNimbusBlobStore(Map<String, Object> conf, NimbusInfo nimbusInfo, ILeaderElector leaderElector) {
+    public static BlobStore getNimbusBlobStore(Map<String, Object> conf,
+            NimbusInfo nimbusInfo,
+            ILeaderElector leaderElector) {
         return getNimbusBlobStore(conf, null, nimbusInfo, leaderElector);
     }
 
-    public static BlobStore getNimbusBlobStore(Map<String, Object> conf, String baseDir, NimbusInfo nimbusInfo, ILeaderElector leaderElector) {
+    public static BlobStore getNimbusBlobStore(Map<String, Object> conf,
+            String baseDir,
+            NimbusInfo nimbusInfo,
+            ILeaderElector leaderElector) {
         String type = (String)conf.get(DaemonConfig.NIMBUS_BLOBSTORE);
         if (type == null) {
             type = LocalFsBlobStore.class.getName();
@@ -174,7 +180,7 @@
      * @param dir The input dir to get the disk space of this local dir
      * @return The total disk space of the input local directory
      */
-    public static long getDU(File dir) {
+    public static long getDiskUsage(File dir) {
         long size = 0;
         if (!dir.exists()) {
             return 0;
@@ -192,7 +198,7 @@
                         isSymLink = true;
                     }
                     if (!isSymLink) {
-                        size += getDU(allFiles[i]);
+                        size += getDiskUsage(allFiles[i]);
                     }
                 }
             }
@@ -214,13 +220,6 @@
 
     /**
      * Meant to be called only by the supervisor for stormjar/stormconf/stormcode files.
-     *
-     * @param key
-     * @param localFile
-     * @param cb
-     * @throws AuthorizationException
-     * @throws KeyNotFoundException
-     * @throws IOException
      */
     public static void downloadResourcesAsSupervisor(String key, String localFile,
                                                      ClientBlobStore cb) throws AuthorizationException, KeyNotFoundException, IOException {
@@ -236,6 +235,14 @@
         return _instance.currentClasspathImpl();
     }
 
+
+    /**
+     *  Returns the current thread classloader.
+     */
+    public static URL getResourceFromClassloader(String name) {
+        return _instance.getResourceFromClassloaderImpl(name);
+    }
+
     /**
      * Determines if a zip archive contains a particular directory.
      *
@@ -384,8 +391,7 @@
      */
     private static void ensureDirectory(File dir) throws IOException {
         if (!dir.mkdirs() && !dir.isDirectory()) {
-            throw new IOException("Mkdirs failed to create " +
-                                  dir.toString());
+            throw new IOException("Mkdirs failed to create " + dir.toString());
         }
     }
 
@@ -394,10 +400,9 @@
      * <p/>
      * This utility will untar ".tar" files and ".tar.gz","tgz" files.
      *
-     * @param inFile   The tar file as input.
-     * @param untarDir The untar directory where to untar the tar file.
-     * @param symlinksDisabled true if symlinks should be disabled, else false.
-     * @throws IOException
+     * @param inFile   The tar file as input
+     * @param untarDir The untar directory where to untar the tar file
+     * @param symlinksDisabled true if symlinks should be disabled, else false
      */
     public static void unTar(File inFile, File untarDir, boolean symlinksDisabled) throws IOException {
         ensureDirectory(untarDir);
@@ -437,8 +442,10 @@
         shexec.execute();
         int exitcode = shexec.getExitCode();
         if (exitcode != 0) {
-            throw new IOException("Error untarring file " + inFile +
-                                  ". Tar process exited with exit code " + exitcode);
+            throw new IOException("Error untarring file "
+                    + inFile
+                    + ". Tar process exited with exit code "
+                    + exitcode);
         }
     }
 
@@ -548,18 +555,18 @@
 
     public static void unpack(File localrsrc, File dst, boolean symLinksDisabled) throws IOException {
         String lowerDst = localrsrc.getName().toLowerCase();
-        if (lowerDst.endsWith(".jar") ||
-            lowerDst.endsWith("_jar")) {
+        if (lowerDst.endsWith(".jar")
+                || lowerDst.endsWith("_jar")) {
             unJar(localrsrc, dst);
-        } else if (lowerDst.endsWith(".zip") ||
-            lowerDst.endsWith("_zip")) {
+        } else if (lowerDst.endsWith(".zip")
+                || lowerDst.endsWith("_zip")) {
             unZip(localrsrc, dst);
-        } else if (lowerDst.endsWith(".tar.gz") ||
-            lowerDst.endsWith("_tar_gz") ||
-            lowerDst.endsWith(".tgz") ||
-            lowerDst.endsWith("_tgz") ||
-            lowerDst.endsWith(".tar") ||
-            lowerDst.endsWith("_tar")) {
+        } else if (lowerDst.endsWith(".tar.gz")
+                || lowerDst.endsWith("_tar_gz")
+                || lowerDst.endsWith(".tgz")
+                || lowerDst.endsWith("_tgz")
+                || lowerDst.endsWith(".tar")
+                || lowerDst.endsWith("_tar")) {
             unTar(localrsrc, dst, symLinksDisabled);
         } else {
             LOG.warn("Cannot unpack " + localrsrc);
@@ -577,10 +584,10 @@
      * Extracts the given file to the given directory. Only zip entries starting with the given prefix are extracted.
      * The prefix is stripped off entry names before extraction.
      *
-     * @param zipFile The zip file to extract.
-     * @param toDir The directory to extract to.
+     * @param zipFile The zip file to extract
+     * @param toDir The directory to extract to
      * @param prefix The prefix to look for in the zip file. If not null only paths starting with the prefix will be
-     * extracted.
+     *     extracted
      */
     public static void extractZipFile(ZipFile zipFile, File toDir, String prefix) throws IOException {
         ensureDirectory(toDir);
@@ -622,8 +629,7 @@
      * Given a File input it will unzip the file in a the unzip directory passed as the second parameter.
      *
      * @param inFile   The zip file as input
-     * @param toDir The unzip directory where to unzip the zip file.
-     * @throws IOException
+     * @param toDir The unzip directory where to unzip the zip file
      */
     public static void unZip(File inFile, File toDir) throws IOException {
         try (ZipFile zipFile = new ZipFile(inFile)) {
@@ -633,12 +639,10 @@
 
     /**
      * Given a zip File input it will return its size Only works for zip files whose uncompressed size is less than 4 GB, otherwise returns
-     * the size module 2^32, per gzip specifications
+     * the size module 2^32, per gzip specifications.
      *
      * @param myFile The zip file as input
      * @return zip file size as a long
-     *
-     * @throws IOException
      */
     public static long zipFileSize(File myFile) throws IOException {
         try (RandomAccessFile raf = new RandomAccessFile(myFile, "r")) {
@@ -685,7 +689,7 @@
      * @param conf The configuration
      * @return True if it's resource aware; false otherwise
      */
-    public static boolean isRAS(Map<String, Object> conf) {
+    public static boolean isRas(Map<String, Object> conf) {
         if (conf.containsKey(DaemonConfig.STORM_SCHEDULER)) {
             if (conf.get(DaemonConfig.STORM_SCHEDULER).equals("org.apache.storm.scheduler.resource.ResourceAwareScheduler")) {
                 return true;
@@ -694,7 +698,7 @@
         return false;
     }
 
-    public static int getEstimatedWorkerCountForRASTopo(Map<String, Object> topoConf, StormTopology topology)
+    public static int getEstimatedWorkerCountForRasTopo(Map<String, Object> topoConf, StormTopology topology)
         throws InvalidTopologyException {
         Double defaultWorkerMaxHeap = ObjectReader.getDouble(topoConf.get(Config.WORKER_HEAP_MEMORY_MB), 768d);
         Double topologyWorkerMaxHeap = ObjectReader.getDouble(topoConf.get(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB), defaultWorkerMaxHeap);
@@ -753,15 +757,20 @@
         return System.getProperty("java.class.path");
     }
 
+
+    public URL getResourceFromClassloaderImpl(String name) {
+        return Thread.currentThread().getContextClassLoader().getResource(name);
+    }
+
     public void downloadResourcesAsSupervisorImpl(String key, String localFile,
                                                   ClientBlobStore cb) throws AuthorizationException, KeyNotFoundException, IOException {
-        final int MAX_RETRY_ATTEMPTS = 2;
-        final int ATTEMPTS_INTERVAL_TIME = 100;
-        for (int retryAttempts = 0; retryAttempts < MAX_RETRY_ATTEMPTS; retryAttempts++) {
+        final int maxRetryAttempts = 2;
+        final int attemptsIntervalTime = 100;
+        for (int retryAttempts = 0; retryAttempts < maxRetryAttempts; retryAttempts++) {
             if (downloadResourcesAsSupervisorAttempt(cb, key, localFile)) {
                 break;
             }
-            Utils.sleep(ATTEMPTS_INTERVAL_TIME);
+            Utils.sleep(attemptsIntervalTime);
         }
     }
 }
diff --git a/storm-server/src/main/java/org/apache/storm/utils/StormCommonInstaller.java b/storm-server/src/main/java/org/apache/storm/utils/StormCommonInstaller.java
index 6f29552..e47ff10 100644
--- a/storm-server/src/main/java/org/apache/storm/utils/StormCommonInstaller.java
+++ b/storm-server/src/main/java/org/apache/storm/utils/StormCommonInstaller.java
@@ -21,17 +21,17 @@
  */
 public class StormCommonInstaller implements AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(StormCommonInstaller.class);
-    private StormCommon _oldInstance;
-    private StormCommon _curInstance;
+    private StormCommon oldInstance;
+    private StormCommon curInstance;
 
     public StormCommonInstaller(StormCommon instance) {
-        _oldInstance = StormCommon.setInstance(instance);
-        _curInstance = instance;
+        oldInstance = StormCommon.setInstance(instance);
+        curInstance = instance;
     }
 
     @Override
     public void close() throws Exception {
-        if (StormCommon.setInstance(_oldInstance) != _curInstance) {
+        if (StormCommon.setInstance(oldInstance) != curInstance) {
             throw new IllegalStateException(
                 "Instances of this resource must be closed in reverse order of opening.");
         }
diff --git a/storm-server/src/main/java/org/apache/storm/utils/ZookeeperServerCnxnFactory.java b/storm-server/src/main/java/org/apache/storm/utils/ZookeeperServerCnxnFactory.java
index eb7019e..f25bc3d 100644
--- a/storm-server/src/main/java/org/apache/storm/utils/ZookeeperServerCnxnFactory.java
+++ b/storm-server/src/main/java/org/apache/storm/utils/ZookeeperServerCnxnFactory.java
@@ -21,57 +21,58 @@
 
 public class ZookeeperServerCnxnFactory {
     private static final Logger LOG = LoggerFactory.getLogger(ZookeeperServerCnxnFactory.class);
-    int _port;
-    NIOServerCnxnFactory _factory;
+    int port;
+    NIOServerCnxnFactory factory;
 
     public ZookeeperServerCnxnFactory(int port, int maxClientCnxns) {
         //port range
         int max;
         if (port <= 0) {
-            _port = 2000;
+            this.port = 2000;
             max = 65535;
         } else {
-            _port = port;
+            this.port = port;
             max = port;
         }
 
         try {
-            _factory = new NIOServerCnxnFactory();
+            factory = new NIOServerCnxnFactory();
         } catch (IOException e) {
-            _port = 0;
-            _factory = null;
+            this.port = 0;
+            factory = null;
             e.printStackTrace();
             throw new RuntimeException(e.getMessage());
         }
 
         //look for available port
-        for (; _port <= max; _port++) {
+        for (; this.port <= max; this.port++) {
             try {
-                _factory.configure(new InetSocketAddress(_port), maxClientCnxns);
-                LOG.debug("Zookeeper server successfully binded at port " + _port);
+                factory.configure(new InetSocketAddress(this.port), maxClientCnxns);
+                LOG.debug("Zookeeper server successfully binded at port " + this.port);
                 break;
             } catch (BindException e1) {
+                //ignore
             } catch (IOException e2) {
-                _port = 0;
-                _factory = null;
+                this.port = 0;
+                factory = null;
                 e2.printStackTrace();
                 throw new RuntimeException(e2.getMessage());
             }
         }
 
-        if (_port > max) {
-            _port = 0;
-            _factory = null;
+        if (this.port > max) {
+            this.port = 0;
+            factory = null;
             LOG.error("Failed to find a port for Zookeeper");
             throw new RuntimeException("No port is available to launch an inprocess zookeeper.");
         }
     }
 
     public int port() {
-        return _port;
+        return port;
     }
 
     public NIOServerCnxnFactory factory() {
-        return _factory;
+        return factory;
     }
 }
diff --git a/storm-server/src/main/java/org/apache/storm/zookeeper/AclEnforcement.java b/storm-server/src/main/java/org/apache/storm/zookeeper/AclEnforcement.java
index d7b6521..693b585 100644
--- a/storm-server/src/main/java/org/apache/storm/zookeeper/AclEnforcement.java
+++ b/storm-server/src/main/java/org/apache/storm/zookeeper/AclEnforcement.java
@@ -290,8 +290,8 @@
 
     private static boolean equivalent(List<ACL> a, List<ACL> b) {
         if (a.size() == b.size()) {
-            for (ACL aAcl : a) {
-                if (!b.contains(aAcl)) {
+            for (ACL acl : a) {
+                if (!b.contains(acl)) {
                     return false;
                 }
             }
diff --git a/storm-server/src/main/java/org/apache/storm/zookeeper/Zookeeper.java b/storm-server/src/main/java/org/apache/storm/zookeeper/Zookeeper.java
index 9171038..e62c9e3 100644
--- a/storm-server/src/main/java/org/apache/storm/zookeeper/Zookeeper.java
+++ b/storm-server/src/main/java/org/apache/storm/zookeeper/Zookeeper.java
@@ -44,7 +44,7 @@
     // tests by subclassing.
     private static final Zookeeper INSTANCE = new Zookeeper();
     private static Logger LOG = LoggerFactory.getLogger(Zookeeper.class);
-    private static Zookeeper _instance = INSTANCE;
+    private static Zookeeper instance = INSTANCE;
 
     /**
      * Provide an instance of this class for delegates to use.  To mock out delegated methods, provide an instance of a subclass that
@@ -53,7 +53,7 @@
      * @param u a Zookeeper instance
      */
     public static void setInstance(Zookeeper u) {
-        _instance = u;
+        instance = u;
     }
 
     /**
@@ -61,12 +61,10 @@
      * longer desired.
      */
     public static void resetInstance() {
-        _instance = INSTANCE;
+        instance = INSTANCE;
     }
 
     public static NIOServerCnxnFactory mkInprocessZookeeper(String localdir, Integer port) throws Exception {
-        File localfile = new File(localdir);
-        ZooKeeperServer zk = new ZooKeeperServer(localfile, localfile, 2000);
         NIOServerCnxnFactory factory = null;
         int report = 2000;
         int limitPort = 65535;
@@ -87,6 +85,8 @@
             }
         }
         LOG.info("Starting inprocess zookeeper at port {} and dir {}", report, localdir);
+        File localfile = new File(localdir);
+        ZooKeeperServer zk = new ZooKeeperServer(localfile, localfile, 2000);
         factory.startup(zk);
         return factory;
     }
@@ -119,7 +119,7 @@
     public static ILeaderElector zkLeaderElector(Map<String, Object> conf, CuratorFramework zkClient, BlobStore blobStore,
                                                  final TopoCache tc, IStormClusterState clusterState, List<ACL> acls,
                                                  StormMetricsRegistry metricsRegistry) {
-        return _instance.zkLeaderElectorImpl(conf, zkClient, blobStore, tc, clusterState, acls, metricsRegistry);
+        return instance.zkLeaderElectorImpl(conf, zkClient, blobStore, tc, clusterState, acls, metricsRegistry);
     }
 
     protected ILeaderElector zkLeaderElectorImpl(Map<String, Object> conf, CuratorFramework zk, BlobStore blobStore,
diff --git a/storm-server/src/test/java/org/apache/storm/daemon/supervisor/BasicContainerTest.java b/storm-server/src/test/java/org/apache/storm/daemon/supervisor/BasicContainerTest.java
index 1c313be..e8bb376 100644
--- a/storm-server/src/test/java/org/apache/storm/daemon/supervisor/BasicContainerTest.java
+++ b/storm-server/src/test/java/org/apache/storm/daemon/supervisor/BasicContainerTest.java
@@ -107,10 +107,10 @@
             new HashMap<>(), ops, "profile");
         //null worker id means generate one...
 
-        assertNotNull(mc._workerId);
+        assertNotNull(mc.workerId);
         verify(ls).getApprovedWorkers();
         Map<String, Integer> expectedNewState = new HashMap<String, Integer>();
-        expectedNewState.put(mc._workerId, port);
+        expectedNewState.put(mc.workerId, port);
         verify(ls).setApprovedWorkers(expectedNewState);
     }
 
@@ -137,7 +137,7 @@
             "SUPERVISOR", supervisorPort, port, la, null, ls, null, new StormMetricsRegistry(),
             new HashMap<>(), ops, "profile");
 
-        assertEquals(workerId, mc._workerId);
+        assertEquals(workerId, mc.workerId);
     }
 
     @Test
@@ -189,7 +189,7 @@
 
         mc.cleanUp();
 
-        assertNull(mc._workerId);
+        assertNull(mc.workerId);
         verify(ls).getApprovedWorkers();
         Map<String, Integer> expectedNewState = new HashMap<String, Integer>();
         verify(ls).setApprovedWorkers(expectedNewState);
diff --git a/storm-server/src/test/java/org/apache/storm/localizer/AsyncLocalizerTest.java b/storm-server/src/test/java/org/apache/storm/localizer/AsyncLocalizerTest.java
index 8228702..550c808 100644
--- a/storm-server/src/test/java/org/apache/storm/localizer/AsyncLocalizerTest.java
+++ b/storm-server/src/test/java/org/apache/storm/localizer/AsyncLocalizerTest.java
@@ -18,6 +18,7 @@
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.OutputStream;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
@@ -66,6 +67,7 @@
 
 import static org.apache.storm.blobstore.BlobStoreAclHandler.WORLD_EVERYTHING;
 import static org.apache.storm.localizer.LocalizedResource.USERCACHE;
+import static org.apache.storm.localizer.LocallyCachedTopologyBlob.LOCAL_MODE_JAR_VERSION;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -249,6 +251,103 @@
         }
     }
 
+
+    @Test
+    public void testRequestDownloadTopologyBlobsLocalMode() throws Exception {
+        // tests download of topology blobs in local mode on a topology without resources folder
+        final String topoId = "TOPO-12345";
+        final String user = "user";
+        LocalAssignment la = new LocalAssignment();
+        la.set_topology_id(topoId);
+        la.set_owner(user);
+        ExecutorInfo ei = new ExecutorInfo();
+        ei.set_task_start(1);
+        ei.set_task_end(1);
+        la.add_to_executors(ei);
+        final String topoName = "TOPO";
+        final int port = 8080;
+        final String simpleLocalName = "simple.txt";
+        final String simpleKey = "simple";
+
+        final String stormLocal = "/tmp/storm-local/";
+        final File userDir = new File(stormLocal, user);
+        final String stormRoot = stormLocal + topoId + "/";
+
+        final String localizerRoot = getTestLocalizerRoot();
+
+        final StormTopology st = new StormTopology();
+        st.set_spouts(new HashMap<>());
+        st.set_bolts(new HashMap<>());
+        st.set_state_spouts(new HashMap<>());
+
+        Map<String, Map<String, Object>> topoBlobMap = new HashMap<>();
+        Map<String, Object> simple = new HashMap<>();
+        simple.put("localname", simpleLocalName);
+        simple.put("uncompress", false);
+        topoBlobMap.put(simpleKey, simple);
+
+        Map<String, Object> conf = new HashMap<>();
+        conf.put(Config.STORM_LOCAL_DIR, stormLocal);
+        conf.put(Config.STORM_CLUSTER_MODE, "local");
+        AdvancedFSOps ops = mock(AdvancedFSOps.class);
+        ConfigUtils mockedCU = mock(ConfigUtils.class);
+        ServerUtils mockedSU = mock(ServerUtils.class);
+
+        Map<String, Object> topoConf = new HashMap<>(conf);
+        topoConf.put(Config.TOPOLOGY_BLOBSTORE_MAP, topoBlobMap);
+        topoConf.put(Config.TOPOLOGY_NAME, topoName);
+
+        List<LocalizedResource> localizedList = new ArrayList<>();
+        StormMetricsRegistry metricsRegistry = new StormMetricsRegistry();
+        LocalizedResource simpleLocal = new LocalizedResource(simpleKey, Paths.get(localizerRoot), false, ops, conf, user, metricsRegistry);
+        localizedList.add(simpleLocal);
+
+        AsyncLocalizer bl = spy(new AsyncLocalizer(conf, ops, localizerRoot, metricsRegistry));
+        ConfigUtils orig = ConfigUtils.setInstance(mockedCU);
+        ServerUtils origSU = ServerUtils.setInstance(mockedSU);
+
+        try {
+            when(mockedCU.supervisorStormDistRootImpl(conf, topoId)).thenReturn(stormRoot);
+            when(mockedCU.readSupervisorStormConfImpl(conf, topoId)).thenReturn(topoConf);
+            when(mockedCU.readSupervisorTopologyImpl(conf, topoId, ops)).thenReturn(st);
+
+            doReturn(mockblobstore).when(bl).getClientBlobStore();
+            doReturn(userDir).when(bl).getLocalUserFileCacheDir(user);
+            doReturn(localizedList).when(bl).getBlobs(any(List.class), any(), any());
+            doReturn(mock(OutputStream.class)).when(ops).getOutputStream(any());
+
+            ReadableBlobMeta blobMeta = new ReadableBlobMeta();
+            blobMeta.set_version(1);
+            doReturn(blobMeta).when(mockblobstore).getBlobMeta(any());
+            when(mockblobstore.getBlob(any())).thenAnswer(invocation -> new TestInputStreamWithMeta(LOCAL_MODE_JAR_VERSION));
+
+            Future<Void> f = bl.requestDownloadTopologyBlobs(la, port, null);
+            f.get(20, TimeUnit.SECONDS);
+
+            verify(bl).getLocalUserFileCacheDir(user);
+
+            verify(ops).fileExists(userDir);
+            verify(ops).forceMkdir(userDir);
+
+            verify(bl).getBlobs(any(List.class), any(), any());
+
+            Path extractionDir = Paths.get(stormRoot,
+                    LocallyCachedTopologyBlob.TopologyBlobType.TOPO_JAR.getTempExtractionDir(LOCAL_MODE_JAR_VERSION));
+
+            // make sure resources dir is created.
+            verify(ops).forceMkdir(extractionDir);
+
+        } finally {
+            try {
+                ConfigUtils.setInstance(orig);
+                ServerUtils.setInstance(origSU);
+                bl.close();
+            } catch (Throwable e) {
+                LOG.error("ERROR trying to close an object", e);
+            }
+        }
+    }
+
     @Before
     public void setUp() throws Exception {
         baseDir = new File(System.getProperty("java.io.tmpdir") + "/blob-store-localizer-test-" + UUID.randomUUID());
diff --git a/storm-server/src/test/java/org/apache/storm/scheduler/resource/TestResourceAwareScheduler.java b/storm-server/src/test/java/org/apache/storm/scheduler/resource/TestResourceAwareScheduler.java
index 17b9fa5..d8ea9ae 100644
--- a/storm-server/src/test/java/org/apache/storm/scheduler/resource/TestResourceAwareScheduler.java
+++ b/storm-server/src/test/java/org/apache/storm/scheduler/resource/TestResourceAwareScheduler.java
@@ -103,9 +103,9 @@
         TopologyDetails topology2 = genTopology("topology2", config, 1, 0, 2, 0, 0, 0, "user");
         Topologies topologies = new Topologies(topology1, topology2);
         Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, config);
-        Map<String, RAS_Node> nodes = RAS_Nodes.getAllNodesFrom(cluster);
+        Map<String, RasNode> nodes = RasNodes.getAllNodesFrom(cluster);
         assertEquals(5, nodes.size());
-        RAS_Node node = nodes.get("r000s000");
+        RasNode node = nodes.get("r000s000");
 
         assertEquals("r000s000", node.getId());
         assertTrue(node.isAlive());
@@ -904,7 +904,7 @@
         scheduler.prepare(config);
         scheduler.schedule(topologies, cluster);
 
-        Map<String, RAS_Node> nodes = RAS_Nodes.getAllNodesFrom(cluster);
+        Map<String, RasNode> nodes = RasNodes.getAllNodesFrom(cluster);
 
         for (SchedulerAssignment assignment : cluster.getAssignments().values()) {
             for (Entry<WorkerSlot, WorkerResources> entry : new HashMap<>(assignment.getScheduledResources()).entrySet()) {
diff --git a/storm-server/src/test/java/org/apache/storm/scheduler/resource/normalization/NormalizedResourcesExtension.java b/storm-server/src/test/java/org/apache/storm/scheduler/resource/normalization/NormalizedResourcesExtension.java
new file mode 100644
index 0000000..b3c8091
--- /dev/null
+++ b/storm-server/src/test/java/org/apache/storm/scheduler/resource/normalization/NormalizedResourcesExtension.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.storm.scheduler.resource.normalization;
+
+import org.junit.jupiter.api.extension.AfterEachCallback;
+import org.junit.jupiter.api.extension.BeforeEachCallback;
+import org.junit.jupiter.api.extension.ExtensionContext;
+
+public class NormalizedResourcesExtension implements BeforeEachCallback, AfterEachCallback {
+    @Override
+    public void beforeEach(ExtensionContext context) {
+        NormalizedResources.resetResourceNames();
+    }
+
+    @Override
+    public void afterEach(ExtensionContext context) {
+        NormalizedResources.resetResourceNames();
+    }
+}
diff --git a/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestConstraintSolverStrategy.java b/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestConstraintSolverStrategy.java
index 4ae5c88..f43868d 100644
--- a/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestConstraintSolverStrategy.java
+++ b/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestConstraintSolverStrategy.java
@@ -37,7 +37,9 @@
 import org.apache.storm.utils.Time;
 import org.apache.storm.utils.Utils;
 import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.ValueSource;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -202,6 +204,50 @@
         }
     }
 
+    /*
+     * Test scheduling large number of executors and constraints.
+     *
+     * Cluster has sufficient resources for scheduling to succeed but can fail due to StackOverflowError.
+     */
+    @ParameterizedTest
+    @ValueSource(ints = {1, 20})
+    public void testScheduleLargeExecutorConstraintCount(int parallelismMultiplier) {
+        // Add 1 topology with large number of executors and constraints. Too many can cause a java.lang.StackOverflowError
+        Config config = createCSSClusterConfig(10, 10, 0, null);
+        config.put(Config.TOPOLOGY_RAS_CONSTRAINT_MAX_STATE_SEARCH, 50000);
+
+        List<List<String>> constraints = new LinkedList<>();
+        addContraints("spout-0", "spout-0", constraints);
+        addContraints("bolt-1", "bolt-1", constraints);
+        addContraints("spout-0", "bolt-0", constraints);
+        addContraints("bolt-2", "spout-0", constraints);
+        addContraints("bolt-1", "bolt-2", constraints);
+        addContraints("bolt-1", "bolt-0", constraints);
+        addContraints("bolt-1", "spout-0", constraints);
+
+        config.put(Config.TOPOLOGY_RAS_CONSTRAINTS, constraints);
+        TopologyDetails topo = genTopology("testTopo-" + parallelismMultiplier, config, 10, 10, 30 * parallelismMultiplier, 30 * parallelismMultiplier, 31414, 0, "user");
+        Topologies topologies = new Topologies(topo);
+
+        Map<String, SupervisorDetails> supMap = genSupervisors(30 * parallelismMultiplier, 30, 3500, 35000);
+        Cluster cluster = makeCluster(topologies, supMap);
+
+        ResourceAwareScheduler scheduler = new ResourceAwareScheduler();
+        scheduler.prepare(config);
+        scheduler.schedule(topologies, cluster);
+
+        boolean scheduleSuccess = isStatusSuccess(cluster.getStatus(topo.getId()));
+
+        if (parallelismMultiplier == 1) {
+            Assert.assertTrue(scheduleSuccess);
+        } else if (parallelismMultiplier == 20) {
+            // For default JVM, scheduling currently fails due to StackOverflow.
+            // For now just log the results of the test. Change to assert when StackOverflow issue is fixed.
+            LOG.info("testScheduleLargeExecutorCount scheduling {} with {}x executor multiplier", scheduleSuccess ? "succeeds" : "fails",
+                    parallelismMultiplier);
+        }
+    }
+
     @Test
     public void testIntegrationWithRAS() {
         List<List<String>> constraints = new LinkedList<>();
diff --git a/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestDefaultResourceAwareStrategy.java b/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestDefaultResourceAwareStrategy.java
index d72d362..31d8ecb 100644
--- a/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestDefaultResourceAwareStrategy.java
+++ b/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestDefaultResourceAwareStrategy.java
@@ -18,8 +18,9 @@
 
 package org.apache.storm.scheduler.resource.strategies.scheduling;
 
+import org.apache.storm.daemon.nimbus.TopologyResources;
 import org.apache.storm.scheduler.IScheduler;
-import org.apache.storm.scheduler.resource.normalization.NormalizedResourcesRule;
+import org.apache.storm.scheduler.resource.normalization.NormalizedResourcesExtension;
 import java.util.Collections;
 import org.apache.storm.Config;
 import org.apache.storm.generated.StormTopology;
@@ -34,7 +35,7 @@
 import org.apache.storm.scheduler.Topologies;
 import org.apache.storm.scheduler.TopologyDetails;
 import org.apache.storm.scheduler.WorkerSlot;
-import org.apache.storm.scheduler.resource.RAS_Node;
+import org.apache.storm.scheduler.resource.RasNode;
 import org.apache.storm.scheduler.resource.ResourceAwareScheduler;
 import org.apache.storm.scheduler.resource.SchedulingResult;
 import org.apache.storm.scheduler.resource.strategies.scheduling.BaseResourceAwareStrategy.ObjectResources;
@@ -42,20 +43,24 @@
 import org.apache.storm.topology.SharedOffHeapWithinWorker;
 import org.apache.storm.topology.SharedOnHeap;
 import org.apache.storm.topology.TopologyBuilder;
-import org.junit.After;
 import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.EnumSource;
+import org.junit.jupiter.params.provider.ValueSource;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.hamcrest.Matchers.closeTo;
+import static org.hamcrest.Matchers.is;
 import static org.junit.Assert.*;
 import static org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler.*;
 
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashSet;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -67,11 +72,17 @@
 import org.apache.storm.metric.StormMetricsRegistry;
 import org.apache.storm.scheduler.resource.normalization.ResourceMetrics;
 
+@ExtendWith({NormalizedResourcesExtension.class})
 public class TestDefaultResourceAwareStrategy {
     private static final Logger LOG = LoggerFactory.getLogger(TestDefaultResourceAwareStrategy.class);
 
     private static final int CURRENT_TIME = 1450418597;
     private static IScheduler scheduler = null;
+    private enum SharedMemoryType {
+        SHARED_OFF_HEAP_NODE,
+        SHARED_OFF_HEAP_WORKER,
+        SHARED_ON_HEAP_WORKER
+    };
 
     private static class TestDNSToSwitchMapping implements DNSToSwitchMapping {
         private final Map<String, String> result;
@@ -93,10 +104,7 @@
         }
     };
 
-    @Rule
-    public NormalizedResourcesRule nrRule = new NormalizedResourcesRule();
-
-    @After
+    @AfterEach
     public void cleanup() {
         if (scheduler != null) {
             scheduler.cleanup();
@@ -104,39 +112,44 @@
         }
     }
 
-    /**
-     * test if the scheduling logic for the DefaultResourceAwareStrategy is correct
+    /*
+     * test assigned memory with shared memory types and oneWorkerPerExecutor
      */
-    @Test
-    public void testDefaultResourceAwareStrategySharedMemory() {
-        int spoutParallelism = 2;
-        int boltParallelism = 2;
-        int numBolts = 3;
+    @ParameterizedTest
+    @EnumSource(SharedMemoryType.class)
+    public void testMultipleSharedMemoryWithOneExecutorPerWorker(SharedMemoryType memoryType) {
+        int spoutParallelism = 4;
         double cpuPercent = 10;
         double memoryOnHeap = 10;
         double memoryOffHeap = 10;
-        double sharedOnHeap = 500;
-        double sharedOffHeapNode = 700;
-        double sharedOffHeapWorker = 500;
+        double sharedOnHeapWithinWorker = 450;
+        double sharedOffHeapWithinNode = 600;
+        double sharedOffHeapWithinWorker = 400;
+
         TopologyBuilder builder = new TopologyBuilder();
-        builder.setSpout("spout", new TestSpout(),
-                spoutParallelism);
-        builder.setBolt("bolt-1", new TestBolt(),
-                boltParallelism).addSharedMemory(new SharedOffHeapWithinWorker(sharedOffHeapWorker, "bolt-1 shared off heap worker")).shuffleGrouping("spout");
-        builder.setBolt("bolt-2", new TestBolt(),
-                boltParallelism).addSharedMemory(new SharedOffHeapWithinNode(sharedOffHeapNode, "bolt-2 shared node")).shuffleGrouping("bolt-1");
-        builder.setBolt("bolt-3", new TestBolt(),
-                boltParallelism).addSharedMemory(new SharedOnHeap(sharedOnHeap, "bolt-3 shared worker")).shuffleGrouping("bolt-2");
-
+        switch (memoryType) {
+            case SHARED_OFF_HEAP_NODE:
+                builder.setSpout("spout", new TestSpout(), spoutParallelism)
+                        .addSharedMemory(new SharedOffHeapWithinNode(sharedOffHeapWithinNode, "spout shared off heap within node"));
+                break;
+            case SHARED_OFF_HEAP_WORKER:
+                builder.setSpout("spout", new TestSpout(), spoutParallelism)
+                        .addSharedMemory(new SharedOffHeapWithinWorker(sharedOffHeapWithinWorker, "spout shared off heap within worker"));
+                break;
+            case SHARED_ON_HEAP_WORKER:
+                builder.setSpout("spout", new TestSpout(), spoutParallelism)
+                        .addSharedMemory(new SharedOnHeap(sharedOnHeapWithinWorker, "spout shared on heap within worker"));
+                break;
+        }
         StormTopology stormToplogy = builder.createTopology();
-
         INimbus iNimbus = new INimbusTest();
-        Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 500, 2000);
+        Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 500, 1000);
         Config conf = createClusterConfig(cpuPercent, memoryOnHeap, memoryOffHeap, null);
-        
+
         conf.put(Config.TOPOLOGY_PRIORITY, 0);
         conf.put(Config.TOPOLOGY_NAME, "testTopology");
         conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 2000);
+        conf.put(Config.TOPOLOGY_RAS_ONE_EXECUTOR_PER_WORKER, true);
         TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0,
                 genExecsAndComps(stormToplogy), CURRENT_TIME, "user");
 
@@ -144,10 +157,176 @@
         Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
 
         scheduler = new ResourceAwareScheduler();
-
         scheduler.prepare(conf);
         scheduler.schedule(topologies, cluster);
-        
+
+        TopologyResources topologyResources = cluster.getTopologyResourcesMap().get(topo.getId());
+        SchedulerAssignment assignment = cluster.getAssignmentById(topo.getId());
+        long numNodes = assignment.getSlotToExecutors().keySet().stream().map(ws -> ws.getNodeId()).distinct().count();
+
+        switch (memoryType) {
+            case SHARED_OFF_HEAP_NODE:
+                // 4 workers on single node. OffHeapNode memory is shared
+                assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
+                assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap + sharedOffHeapWithinNode, 0.01));
+                assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(0, 0.01));
+                assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(sharedOffHeapWithinNode, 0.01));
+                assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
+                assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
+                assertThat(numNodes, is(1L));
+                assertThat(cluster.getAssignedNumWorkers(topo), is(spoutParallelism));
+                break;
+            case SHARED_OFF_HEAP_WORKER:
+                // 4 workers on 2 nodes. OffHeapWorker memory not shared -- consumed 4x, once for each worker)
+                assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
+                assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(spoutParallelism * (memoryOffHeap + sharedOffHeapWithinWorker), 0.01));
+                assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(0, 0.01));
+                assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(spoutParallelism * sharedOffHeapWithinWorker, 0.01));
+                assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
+                assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
+                assertThat(numNodes, is(2L));
+                assertThat(cluster.getAssignedNumWorkers(topo), is(spoutParallelism));
+                break;
+            case SHARED_ON_HEAP_WORKER:
+                // 4 workers on 2 nodes. onHeap memory not shared -- consumed 4x, once for each worker
+                assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(spoutParallelism * (memoryOnHeap + sharedOnHeapWithinWorker), 0.01));
+                assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
+                assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(spoutParallelism * sharedOnHeapWithinWorker, 0.01));
+                assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(0, 0.01));
+                assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
+                assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
+                assertThat(numNodes, is(2L));
+                assertThat(cluster.getAssignedNumWorkers(topo), is(spoutParallelism));
+                break;
+        }
+    }
+
+    /*
+     * test scheduling does not cause negative resources
+     */
+    @Test
+    public void testSchedulingNegativeResources() {
+        int spoutParallelism = 2;
+        int boltParallelism = 2;
+        double cpuPercent = 10;
+        double memoryOnHeap = 10;
+        double memoryOffHeap = 10;
+        double sharedOnHeapWithinWorker = 400;
+        double sharedOffHeapWithinNode = 700;
+        double sharedOffHeapWithinWorker = 500;
+
+        Config conf = createClusterConfig(cpuPercent, memoryOnHeap, memoryOffHeap, null);
+        TopologyDetails[] topo = new TopologyDetails[2];
+
+        // 1st topology
+        TopologyBuilder builder = new TopologyBuilder();
+        builder.setSpout("spout", new TestSpout(),
+                spoutParallelism);
+        builder.setBolt("bolt-1", new TestBolt(),
+                boltParallelism).addSharedMemory(new SharedOffHeapWithinWorker(sharedOffHeapWithinWorker, "bolt-1 shared off heap within worker")).shuffleGrouping("spout");
+        builder.setBolt("bolt-2", new TestBolt(),
+                boltParallelism).addSharedMemory(new SharedOffHeapWithinNode(sharedOffHeapWithinNode, "bolt-2 shared off heap within node")).shuffleGrouping("bolt-1");
+        builder.setBolt("bolt-3", new TestBolt(),
+                boltParallelism).addSharedMemory(new SharedOnHeap(sharedOnHeapWithinWorker, "bolt-3 shared on heap within worker")).shuffleGrouping("bolt-2");
+        StormTopology stormToplogy = builder.createTopology();
+
+        conf.put(Config.TOPOLOGY_PRIORITY, 1);
+        conf.put(Config.TOPOLOGY_NAME, "testTopology-0");
+        conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 2000);
+        topo[0] = new TopologyDetails("testTopology-id-0", conf, stormToplogy, 0,
+                genExecsAndComps(stormToplogy), CURRENT_TIME, "user");
+
+        // 2nd topology
+        builder = new TopologyBuilder();
+        builder.setSpout("spout", new TestSpout(),
+                spoutParallelism).addSharedMemory(new SharedOffHeapWithinNode(sharedOffHeapWithinNode, "spout shared off heap within node"));
+        stormToplogy = builder.createTopology();
+
+        conf.put(Config.TOPOLOGY_PRIORITY, 0);
+        conf.put(Config.TOPOLOGY_NAME, "testTopology-1");
+        topo[1] = new TopologyDetails("testTopology-id-1", conf, stormToplogy, 0,
+                genExecsAndComps(stormToplogy), CURRENT_TIME, "user");
+
+        Map<String, SupervisorDetails> supMap = genSupervisors(1, 4, 500, 2000);
+        Topologies topologies = new Topologies(topo[0]);
+        Cluster cluster = new Cluster(new INimbusTest(), new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
+
+        // schedule 1st topology
+        scheduler = new ResourceAwareScheduler();
+        scheduler.prepare(conf);
+        scheduler.schedule(topologies, cluster);
+        assertTopologiesFullyScheduled(cluster, topo[0].getName());
+
+        // attempt scheduling both topologies.
+        // this triggered negative resource event as the second topology incorrectly scheduled with the first in place
+        // first topology should get evicted for higher priority (lower value) second topology to successfully schedule
+        topologies = new Topologies(topo[0], topo[1]);
+        cluster = new Cluster(cluster, topologies);
+        scheduler.schedule(topologies, cluster);
+        assertTopologiesNotScheduled(cluster, topo[0].getName());
+        assertTopologiesFullyScheduled(cluster, topo[1].getName());
+
+        // check negative resource count
+        assertThat(cluster.getResourceMetrics().getNegativeResourceEventsMeter().getCount(), is(0L));
+    }
+
+    /**
+     * test if the scheduling shared memory is correct with/without oneExecutorPerWorker enabled
+     */
+    @ParameterizedTest
+    @ValueSource(booleans = {true, false})
+    public void testDefaultResourceAwareStrategySharedMemory(boolean oneExecutorPerWorker) {
+        int spoutParallelism = 2;
+        int boltParallelism = 2;
+        int numBolts = 3;
+        double cpuPercent = 10;
+        double memoryOnHeap = 10;
+        double memoryOffHeap = 10;
+        double sharedOnHeapWithinWorker = 400;
+        double sharedOffHeapWithinNode = 700;
+        double sharedOffHeapWithinWorker = 600;
+
+        TopologyBuilder builder = new TopologyBuilder();
+        builder.setSpout("spout", new TestSpout(),
+                spoutParallelism);
+        builder.setBolt("bolt-1", new TestBolt(),
+                boltParallelism).addSharedMemory(new SharedOffHeapWithinWorker(sharedOffHeapWithinWorker, "bolt-1 shared off heap within worker")).shuffleGrouping("spout");
+        builder.setBolt("bolt-2", new TestBolt(),
+                boltParallelism).addSharedMemory(new SharedOffHeapWithinNode(sharedOffHeapWithinNode, "bolt-2 shared off heap within node")).shuffleGrouping("bolt-1");
+        builder.setBolt("bolt-3", new TestBolt(),
+                boltParallelism).addSharedMemory(new SharedOnHeap(sharedOnHeapWithinWorker, "bolt-3 shared on heap within worker")).shuffleGrouping("bolt-2");
+
+        StormTopology stormToplogy = builder.createTopology();
+
+        INimbus iNimbus = new INimbusTest();
+        Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 500, 2000);
+        Config conf = createClusterConfig(cpuPercent, memoryOnHeap, memoryOffHeap, null);
+
+        conf.put(Config.TOPOLOGY_PRIORITY, 0);
+        conf.put(Config.TOPOLOGY_NAME, "testTopology");
+        conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 2000);
+        conf.put(Config.TOPOLOGY_RAS_ONE_EXECUTOR_PER_WORKER, oneExecutorPerWorker);
+        TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0,
+                genExecsAndComps(stormToplogy), CURRENT_TIME, "user");
+
+        Topologies topologies = new Topologies(topo);
+        Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
+
+        scheduler = new ResourceAwareScheduler();
+        scheduler.prepare(conf);
+        scheduler.schedule(topologies, cluster);
+
+        // one worker per executor scheduling
+        // [3,3] [7,7], [0,0] [2,2] [6,6] [1,1] [5,5] [4,4] sorted executor ordering
+        // spout  [0,0] [1,1]
+        // bolt-1 [2,2] [3,3]
+        // bolt-2 [6,6] [7,7]
+        // bolt-3 [4,4] [5,5]
+        //
+        // expect 8 workers over 2 nodes
+        // node r000s000 workers: bolt-1 bolt-2 spout bolt-1 (no memory sharing)
+        // node r000s001 workers: bolt-2 spout bolt-3 bolt-3 (no memory sharing)
+
         for (Entry<String, SupervisorResources> entry: cluster.getSupervisorsResourcesMap().entrySet()) {
             String supervisorId = entry.getKey();
             SupervisorResources resources = entry.getValue();
@@ -155,28 +334,60 @@
             assertTrue(supervisorId, resources.getTotalMem() >= resources.getUsedMem());
         }
 
-        // Everything should fit in a single slot
-        int totalNumberOfTasks = (spoutParallelism + (boltParallelism * numBolts));
-        double totalExpectedCPU = totalNumberOfTasks * cpuPercent;
-        double totalExpectedOnHeap = (totalNumberOfTasks * memoryOnHeap) + sharedOnHeap;
-        double totalExpectedWorkerOffHeap = (totalNumberOfTasks * memoryOffHeap) + sharedOffHeapWorker;
-        
-        SchedulerAssignment assignment = cluster.getAssignmentById(topo.getId());
-        assertEquals(1, assignment.getSlots().size());
-        WorkerSlot ws = assignment.getSlots().iterator().next();
-        String nodeId = ws.getNodeId();
-        assertEquals(1, assignment.getNodeIdToTotalSharedOffHeapMemory().size());
-        assertEquals(sharedOffHeapNode, assignment.getNodeIdToTotalSharedOffHeapMemory().get(nodeId), 0.01);
-        assertEquals(1, assignment.getScheduledResources().size());
-        WorkerResources resources = assignment.getScheduledResources().get(ws);
-        assertEquals(totalExpectedCPU, resources.get_cpu(), 0.01);
-        assertEquals(totalExpectedOnHeap, resources.get_mem_on_heap(), 0.01);
-        assertEquals(totalExpectedWorkerOffHeap, resources.get_mem_off_heap(), 0.01);
-        assertEquals(sharedOnHeap, resources.get_shared_mem_on_heap(), 0.01);
-        assertEquals(sharedOffHeapWorker, resources.get_shared_mem_off_heap(), 0.01);
+        if (!oneExecutorPerWorker) {
+            // Everything should fit in a single slot
+            int totalNumberOfTasks = (spoutParallelism + (boltParallelism * numBolts));
+            double totalExpectedCPU = totalNumberOfTasks * cpuPercent;
+            double totalExpectedOnHeap = (totalNumberOfTasks * memoryOnHeap) + sharedOnHeapWithinWorker;
+            double totalExpectedWorkerOffHeap = (totalNumberOfTasks * memoryOffHeap) + sharedOffHeapWithinWorker;
+
+            SchedulerAssignment assignment = cluster.getAssignmentById(topo.getId());
+            assertThat(assignment.getSlots().size(), is(1));
+            WorkerSlot ws = assignment.getSlots().iterator().next();
+            String nodeId = ws.getNodeId();
+            assertThat(assignment.getNodeIdToTotalSharedOffHeapNodeMemory().size(), is(1));
+            assertThat(assignment.getNodeIdToTotalSharedOffHeapNodeMemory().get(nodeId), closeTo(sharedOffHeapWithinNode, 0.01));
+            assertThat(assignment.getScheduledResources().size(), is(1));
+            WorkerResources resources = assignment.getScheduledResources().get(ws);
+            assertThat(resources.get_cpu(), closeTo(totalExpectedCPU, 0.01));
+            assertThat(resources.get_mem_on_heap(), closeTo(totalExpectedOnHeap, 0.01));
+            assertThat(resources.get_mem_off_heap(), closeTo(totalExpectedWorkerOffHeap, 0.01));
+            assertThat(resources.get_shared_mem_on_heap(), closeTo(sharedOnHeapWithinWorker, 0.01));
+            assertThat(resources.get_shared_mem_off_heap(), closeTo(sharedOffHeapWithinWorker, 0.01));
+        } else {
+            // one worker per executor
+            int totalNumberOfTasks = (spoutParallelism + (boltParallelism * numBolts));
+            TopologyResources topologyResources = cluster.getTopologyResourcesMap().get(topo.getId());
+
+            // get expected mem on topology rather than per executor
+            double expectedMemOnHeap = (totalNumberOfTasks * memoryOnHeap) + 2 * sharedOnHeapWithinWorker;
+            double expectedMemOffHeap = (totalNumberOfTasks * memoryOffHeap) + 2 * sharedOffHeapWithinWorker + 2 * sharedOffHeapWithinNode;
+            double expectedMemSharedOnHeap = 2 * sharedOnHeapWithinWorker;
+            double expectedMemSharedOffHeap = 2 * sharedOffHeapWithinWorker + 2 * sharedOffHeapWithinNode;
+            double expectedMemNonSharedOnHeap = totalNumberOfTasks * memoryOnHeap;
+            double expectedMemNonSharedOffHeap = totalNumberOfTasks * memoryOffHeap;
+            assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(expectedMemOnHeap, 0.01));
+            assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(expectedMemOffHeap, 0.01));
+            assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(expectedMemSharedOnHeap, 0.01));
+            assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(expectedMemSharedOffHeap, 0.01));
+            assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(expectedMemNonSharedOnHeap, 0.01));
+            assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(expectedMemNonSharedOffHeap, 0.01));
+
+            double totalExpectedCPU = totalNumberOfTasks * cpuPercent;
+            assertThat(topologyResources.getAssignedCpu(), closeTo(totalExpectedCPU, 0.01));
+
+            // expect 8 workers
+            SchedulerAssignment assignment = cluster.getAssignmentById(topo.getId());
+            int numAssignedWorkers = cluster.getAssignedNumWorkers(topo);
+            assertThat(numAssignedWorkers, is(8));
+            assertThat(assignment.getSlots().size(), is(8));
+
+            // expect 2 nodes
+            long numNodes = assignment.getSlotToExecutors().keySet().stream().map(ws -> ws.getNodeId()).distinct().count();
+            assertThat(numNodes, is(2L));
+        }
     }
     
-    
     /**
      * test if the scheduling logic for the DefaultResourceAwareStrategy is correct
      */
@@ -332,7 +543,7 @@
         List<String> nodeHostnames = rackToNodes.get("rack-1");
         for (int i = 0; i< topo2.getExecutors().size()/2; i++) {
             String nodeHostname = nodeHostnames.get(i % nodeHostnames.size());
-            RAS_Node node = rs.hostnameToNodes(nodeHostname).get(0);
+            RasNode node = rs.hostnameToNodes(nodeHostname).get(0);
             WorkerSlot targetSlot = node.getFreeSlots().iterator().next();
             ExecutorDetails targetExec = executorIterator.next();
             // to keep track of free slots
@@ -457,7 +668,7 @@
         List<String> nodeHostnames = rackToNodes.get("rack-1");
         for (int i = 0; i< topo2.getExecutors().size()/2; i++) {
             String nodeHostname = nodeHostnames.get(i % nodeHostnames.size());
-            RAS_Node node = rs.hostnameToNodes(nodeHostname).get(0);
+            RasNode node = rs.hostnameToNodes(nodeHostname).get(0);
             WorkerSlot targetSlot = node.getFreeSlots().iterator().next();
             ExecutorDetails targetExec = executorIterator.next();
             // to keep track of free slots
diff --git a/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestGenericResourceAwareStrategy.java b/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestGenericResourceAwareStrategy.java
index 037d226..033b3cf 100644
--- a/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestGenericResourceAwareStrategy.java
+++ b/storm-server/src/test/java/org/apache/storm/scheduler/resource/strategies/scheduling/TestGenericResourceAwareStrategy.java
@@ -140,7 +140,7 @@
         
         SchedulerAssignment assignment = cluster.getAssignmentById(topo.getId());
         Set<WorkerSlot> slots = assignment.getSlots();
-        Map<String, Double> nodeToTotalShared = assignment.getNodeIdToTotalSharedOffHeapMemory();
+        Map<String, Double> nodeToTotalShared = assignment.getNodeIdToTotalSharedOffHeapNodeMemory();
         LOG.info("NODE TO SHARED OFF HEAP {}", nodeToTotalShared);
         Map<WorkerSlot, WorkerResources> scheduledResources = assignment.getScheduledResources();
         assertEquals(2, slots.size());
diff --git a/storm-shaded-deps/pom.xml b/storm-shaded-deps/pom.xml
index 16c4e37..499d520 100644
--- a/storm-shaded-deps/pom.xml
+++ b/storm-shaded-deps/pom.xml
@@ -23,7 +23,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>..</relativePath>
     </parent>
 
diff --git a/storm-submit-tools/pom.xml b/storm-submit-tools/pom.xml
index 3ff57df..21db63a 100644
--- a/storm-submit-tools/pom.xml
+++ b/storm-submit-tools/pom.xml
@@ -17,7 +17,7 @@
     <parent>
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>../pom.xml</relativePath>
     </parent>
     <modelVersion>4.0.0</modelVersion>
@@ -114,9 +114,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/storm-webapp/pom.xml b/storm-webapp/pom.xml
index 8d42589..a886fee 100644
--- a/storm-webapp/pom.xml
+++ b/storm-webapp/pom.xml
@@ -21,7 +21,7 @@
 
         <artifactId>storm</artifactId>
         <groupId>org.apache.storm</groupId>
-        <version>2.0.1-SNAPSHOT</version>
+        <version>2.2.0-SNAPSHOT</version>
         <relativePath>..</relativePath>
     </parent>
 
@@ -30,17 +30,12 @@
     <name>Storm Webapp</name>
     <description>Webapp Servers for Apache Storm</description>
 
-    <properties>
-        <jersey.version>2.27</jersey.version>
-        <grizzly.version>2.4.3</grizzly.version>
-        <jersey-grizzly2.version>1.19.4</jersey-grizzly2.version>
-    </properties>
     <dependencies>
         <!-- storm-webapp is based on storm-core -->
         <dependency>
             <groupId>org.apache.storm</groupId>
             <artifactId>storm-core</artifactId>
-            <version>2.0.1-SNAPSHOT</version>
+            <version>2.2.0-SNAPSHOT</version>
             <scope>${provided.scope}</scope>
         </dependency>
         <dependency>
@@ -101,14 +96,6 @@
                     <groupId>ch.qos.logback</groupId>
                     <artifactId>logback-access</artifactId>
                 </exclusion>
-                <exclusion>
-                    <groupId>org.glassfish.jersey.ext</groupId>
-                    <artifactId>jersey-metainf-services</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.glassfish.jersey.ext</groupId>
-                    <artifactId>jersey-bean-validation</artifactId>
-                </exclusion>
             </exclusions>
         </dependency>
         <dependency>
@@ -131,152 +118,36 @@
             <artifactId>dropwizard-testing</artifactId>
             <version>${dropwizard.version}</version>
             <scope>test</scope>
-            <exclusions>
-                <exclusion>
-                    <groupId>com.sun.jersey</groupId>
-                    <artifactId>jersey-server</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.glassfish.jersey.core</groupId>
-                    <artifactId>jersey-servlet</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>com.sun.jersey</groupId>
-                    <artifactId>jersey-core</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.glassfish.jersey.ext</groupId>
-                    <artifactId>jersey-metainf-services</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.glassfish.jersey.ext</groupId>
-                    <artifactId>jersey-bean-validation</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.glassfish.jersey.test-framework.providers</groupId>
-                    <artifactId>jersey-test-framework-provider-inmemory</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.glassfish.jersey.test-framework.providers</groupId>
-                    <artifactId>ersey-test-framework-provider-inmemory</artifactId>
-                </exclusion>
-            </exclusions>
-        </dependency>
-        <!-- Java 9+ compatibility, ensure Java EE classes are on classpath when using jersey (through Dropwizard) -->
-        <dependency>
-            <groupId>javax.activation</groupId>
-            <artifactId>activation</artifactId>
-        </dependency>
-        <dependency>
-            <groupId>javax.xml.bind</groupId>
-            <artifactId>jaxb-api</artifactId>
         </dependency>
 
         <!-- UI -->
         <dependency>
-            <groupId>org.glassfish.jersey</groupId>
-            <artifactId>jersey-bom</artifactId>
-            <version>${jersey.version}</version>
-            <type>pom</type>
-            <scope>compile</scope>
-        </dependency>
-        <dependency>
             <groupId>org.glassfish.jersey.containers</groupId>
             <artifactId>jersey-container-grizzly2-http</artifactId>
-            <version>${jersey.version}</version>
-            <exclusions>
-                <exclusion>
-                    <groupId>com.sun.jersey</groupId>
-                    <artifactId>jersey-server</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.glassfish.jersey.core</groupId>
-                    <artifactId>jersey-servlet</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>com.sun.jersey</groupId>
-                    <artifactId>jersey-core</artifactId>
-                </exclusion>
-            </exclusions>
         </dependency>
         <dependency>
             <groupId>org.glassfish.jersey.inject</groupId>
             <artifactId>jersey-hk2</artifactId>
-            <version>${jersey.version}</version>
         </dependency>
         <dependency>
             <groupId>org.glassfish.jersey.containers</groupId>
             <artifactId>jersey-container-grizzly2-servlet</artifactId>
-            <version>${jersey.version}</version>
-            <exclusions>
-                <exclusion>
-                    <groupId>com.sun.jersey</groupId>
-                    <artifactId>jersey-server</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.glassfish.jersey.core</groupId>
-                    <artifactId>jersey-servlet</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>com.sun.jersey</groupId>
-                    <artifactId>jersey-core</artifactId>
-                </exclusion>
-            </exclusions>
+        </dependency>
+        <!-- Extra Java 11 jars for Jersey. Jersey's dependency tree only includes these on Java 11,
+            so we need to include them manually to ensure that Java 8 builds work on Java 11. -->
+        <dependency>
+            <groupId>com.sun.activation</groupId>
+            <artifactId>jakarta.activation</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.sun.jersey</groupId>
-            <artifactId>jersey-grizzly2</artifactId>
-            <version>${jersey-grizzly2.version}</version>
-            <exclusions>
-                <exclusion>
-                    <groupId>com.sun.jersey</groupId>
-                    <artifactId>jersey-server</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.glassfish.jersey.core</groupId>
-                    <artifactId>jersey-servlet</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>com.sun.jersey</groupId>
-                    <artifactId>jersey-core</artifactId>
-                </exclusion>
-            </exclusions>
+            <groupId>jakarta.activation</groupId>
+            <artifactId>jakarta.activation-api</artifactId>
         </dependency>
         <dependency>
-            <groupId>com.sun.jersey</groupId>
-            <artifactId>jersey-grizzly2-servlet</artifactId>
-            <version>${jersey-grizzly2.version}</version>
-            <scope>test</scope>
-            <exclusions>
-                <exclusion>
-                    <groupId>com.sun.jersey</groupId>
-                    <artifactId>jersey-server</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>org.glassfish.jersey.core</groupId>
-                    <artifactId>jersey-servlet</artifactId>
-                </exclusion>
-                <exclusion>
-                    <groupId>com.sun.jersey</groupId>
-                    <artifactId>jersey-core</artifactId>
-                </exclusion>
-            </exclusions>
+            <groupId>jakarta.xml.bind</groupId>
+            <artifactId>jakarta.xml.bind-api</artifactId>
         </dependency>
-        <dependency>
-            <groupId>org.glassfish.grizzly</groupId>
-            <artifactId>grizzly-framework</artifactId>
-            <version>${grizzly.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>org.glassfish.grizzly</groupId>
-            <artifactId>grizzly-http-server</artifactId>
-            <version>${grizzly.version}</version>
-        </dependency>
-        <dependency>
-            <groupId>org.glassfish.grizzly</groupId>
-            <artifactId>grizzly-http</artifactId>
-            <version>${grizzly.version}</version>
-        </dependency>
+        <!-- End extra Jersey Java 11 jars -->
     </dependencies>
     <build>
         <resources>
@@ -308,9 +179,6 @@
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
-                <configuration>
-                    <maxAllowedViolations>0</maxAllowedViolations>
-                </configuration>
             </plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
diff --git a/storm-webapp/src/main/java/org/apache/storm/daemon/logviewer/handler/LogviewerLogSearchHandler.java b/storm-webapp/src/main/java/org/apache/storm/daemon/logviewer/handler/LogviewerLogSearchHandler.java
index fdec017..1feb6d5 100644
--- a/storm-webapp/src/main/java/org/apache/storm/daemon/logviewer/handler/LogviewerLogSearchHandler.java
+++ b/storm-webapp/src/main/java/org/apache/storm/daemon/logviewer/handler/LogviewerLogSearchHandler.java
@@ -798,7 +798,7 @@
         private Integer newByteOffset;
         private byte[] newBeforeBytes;
 
-        public SubstringSearchResult(List<Map<String, Object>> matches, Integer newByteOffset, byte[] newBeforeBytes) {
+        SubstringSearchResult(List<Map<String, Object>> matches, Integer newByteOffset, byte[] newBeforeBytes) {
             this.matches = matches;
             this.newByteOffset = newByteOffset;
             this.newBeforeBytes = newBeforeBytes;
diff --git a/storm-webapp/src/main/java/org/apache/storm/daemon/ui/UIHelpers.java b/storm-webapp/src/main/java/org/apache/storm/daemon/ui/UIHelpers.java
index 3f4595c..4d37b64 100644
--- a/storm-webapp/src/main/java/org/apache/storm/daemon/ui/UIHelpers.java
+++ b/storm-webapp/src/main/java/org/apache/storm/daemon/ui/UIHelpers.java
@@ -526,7 +526,6 @@
     private static final AtomicReference<List<Map<String, String>>> MEMORIZED_VERSIONS = new AtomicReference<>();
     private static final AtomicReference<Map<String, String>> MEMORIZED_FULL_VERSION = new AtomicReference<>();
 
-
     private static Map<String, String> toJsonStruct(IVersionInfo info) {
         Map<String, String> ret = new HashMap<>();
         ret.put("version", info.getVersion());
@@ -880,6 +879,7 @@
         result.put("id", supervisorSummary.get_supervisor_id());
         result.put("host", supervisorSummary.get_host());
         result.put("uptime", UIHelpers.prettyUptimeSec(supervisorSummary.get_uptime_secs()));
+        result.put("blacklisted", supervisorSummary.is_blacklisted());
         result.put("uptimeSeconds", supervisorSummary.get_uptime_secs());
         result.put("slotsTotal", supervisorSummary.get_num_workers());
         result.put("slotsUsed", supervisorSummary.get_num_used_workers());
@@ -1022,7 +1022,7 @@
      */
     private static List<Map> getSupervisorsMap(List<SupervisorSummary> supervisors,
                                                Map<String, Object> config) {
-        List<Map> supervisorMaps = new ArrayList();
+        List<Map> supervisorMaps = new ArrayList<>();
         for (SupervisorSummary supervisorSummary : supervisors) {
             supervisorMaps.add(getPrettifiedSupervisorMap(supervisorSummary, config));
         }
@@ -1051,7 +1051,7 @@
      */
     public static Map<String, Object> getSupervisorPageInfo(
             SupervisorPageInfo supervisorPageInfo, Map<String,Object> config) {
-        Map<String, Object> result = new HashMap();
+        Map<String, Object> result = new HashMap<>();
         result.put("workers", getWorkerSummaries(supervisorPageInfo, config));
         result.put("schedulerDisplayResource", config.get(DaemonConfig.SCHEDULER_DISPLAY_RESOURCE));
         List<Map> supervisorMaps = getSupervisorsMap(supervisorPageInfo.get_supervisor_summaries(), config);
diff --git a/storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/templates/index-page-template.html b/storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/templates/index-page-template.html
index 027333b..a60e578 100644
--- a/storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/templates/index-page-template.html
+++ b/storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/templates/index-page-template.html
@@ -419,6 +419,11 @@
           Version
         </span>
       </th>
+      <th>
+        <span data-toggle="tooltip" data-placement="top" title="Whether this supervisor is blacklisted or not">
+          Blacklisted
+        </span>
+      </th>
     </tr>
   </thead>
   <tbody>
@@ -441,6 +446,7 @@
       <td>{{availCpu}}</td>
       {{/schedulerDisplayResource}}
       <td>{{version}}</td>
+      <td>{{blacklisted}}</td>
     </tr>
     {{/supervisors}}
   </tbody>
diff --git a/storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/templates/supervisor-page-template.html b/storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/templates/supervisor-page-template.html
index 43866c6..3475f12 100644
--- a/storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/templates/supervisor-page-template.html
+++ b/storm-webapp/src/main/java/org/apache/storm/daemon/ui/WEB-INF/templates/supervisor-page-template.html
@@ -82,13 +82,18 @@
               Version
             </span>
           </th>
+          <th>
+            <span data-toggle="tooltip" data-placement="top" title="Whether this supervisor is blacklisted or not">
+              Blacklisted
+            </span>
+          </th>
         </tr>
       </thead>
       <tbody>
       {{#supervisors}}
           <tr>
-            <td><a href="/supervisor.html?host={{host}}">{{host}} (<a href="{{logLink}}" title="View log">log</a>)</a></td>
-            <td><a href="/supervisor.html?id={{id}}">{{id}}</td>
+            <td><a href="/supervisor.html?host={{host}}">{{host}}</a> (<a href="{{logLink}}" title="View log">log</a>)</td>
+            <td><a href="/supervisor.html?id={{id}}">{{id}}</a></td>
             <td>{{uptime}}</td>
             <td>{{slotsTotal}}</td>
             <td>{{slotsUsed}}</td>
@@ -103,6 +108,7 @@
             <td>{{availCpu}}</td>
             {{/schedulerDisplayResource}}
             <td>{{version}}</td>
+            <td>{{blacklisted}}</td>
           </tr>
       {{/supervisors}}
       </tbody>